[Git][java-team/netty][bookworm] 2 commits: Refresh patches
Bastien Roucariès (@rouca)
gitlab at salsa.debian.org
Sat Nov 29 11:53:11 GMT 2025
Bastien Roucariès pushed to branch bookworm at Debian Java Maintainers / netty
Commits:
7bf95944 by Bastien Roucariès at 2025-11-29T12:50:29+01:00
Refresh patches
- - - - -
5670a389 by Bastien Roucariès at 2025-11-29T12:52:40+01:00
Finalize changelog
- - - - -
8 changed files:
- debian/changelog
- + debian/patches/CVE-2024-29025.patch
- debian/patches/CVE-2025-55163_1.patch
- debian/patches/CVE-2025-55163_before-1.patch
- debian/patches/CVE-2025-58056.patch
- + debian/patches/CVE-2025-58057.patch
- debian/patches/CVE-2025-59419 → debian/patches/CVE-2025-59419.patch
- debian/patches/series
Changes:
=====================================
debian/changelog
=====================================
@@ -1,6 +1,15 @@
netty (1:4.1.48-7+deb12u2) bookworm-security; urgency=medium
* Team upload
+ * Fix CVE-2024-29025 (Closes: #1068110)
+ The `HttpPostRequestDecoder` can be tricked to accumulate data.
+ While the decoder can store items on the disk if configured so,
+ there are no limits to the number of fields the form can have,
+ an attacher can send a chunked post consisting of many small
+ fields that will be accumulated in the `bodyListHttpData` list.
+ The decoder cumulates bytes in the `undecodedChunk` buffer
+ until it can decode a field, this field can cumulate data
+ without limits
* Fix CVE-2025-55163 (Closes: #1111105)
Netty is vulnerable to MadeYouReset DDoS.
This is a logical vulnerability in the HTTP/2 protocol,
@@ -15,6 +24,15 @@ netty (1:4.1.48-7+deb12u2) bookworm-security; urgency=medium
no limit in how often it calls pull, decompressing
data 64K bytes at a time. The buffers are saved in
the output list, and remain reachable until OOM is hit.
+ * Fix CVE-2025-58057:
+ When supplied with specially crafted input, BrotliDecoder
+ and certain other decompression decoders will allocate
+ a large number of reachable byte buffers, which can lead
+ to denial of service. BrotliDecoder.decompress has no limit
+ in how often it calls pull, decompressing data 64K bytes at
+ a time. The buffers are saved in the output list, and remain
+ reachable until OOM is hit.
+ (Closes: #1113994)
* Fix CVE-2025-59419 (Closes: #1118282)
SMTP Command Injection Vulnerability Allowing Email Forgery
An SMTP Command Injection (CRLF Injection) vulnerability
@@ -25,7 +43,7 @@ netty (1:4.1.48-7+deb12u2) bookworm-security; urgency=medium
be used to impersonate executives and forge high-stakes
corporate communications.
- -- Bastien Roucariès <rouca at debian.org> Sat, 15 Nov 2025 10:19:43 +0100
+ -- Bastien Roucariès <rouca at debian.org> Sat, 29 Nov 2025 12:49:01 +0100
netty (1:4.1.48-7+deb12u1) bookworm-security; urgency=high
=====================================
debian/patches/CVE-2024-29025.patch
=====================================
@@ -0,0 +1,400 @@
+From: Markus Koschany <apo at debian.org>
+Date: Sun, 12 May 2024 21:17:23 +0200
+Subject: CVE-2024-29025
+
+Upstream-Advisory: https://github.com/netty/netty/security/advisories/GHSA-5jpm-x58v-624v
+Origin: https://github.com/netty/netty/commit/0d0c6ed782d13d423586ad0c71737b2c7d02058c
+---
+ .../multipart/HttpPostMultipartRequestDecoder.java | 41 ++++++++
+ .../http/multipart/HttpPostRequestDecoder.java | 69 ++++++++++++++
+ .../multipart/HttpPostStandardRequestDecoder.java | 44 +++++++++
+ .../http/multipart/HttpPostRequestDecoderTest.java | 103 +++++++++++++++++++++
+ 4 files changed, 257 insertions(+)
+
+diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java
+index 4d59e4d..8a7d679 100644
+--- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java
++++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostMultipartRequestDecoder.java
+@@ -62,6 +62,16 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
+ */
+ private final HttpRequest request;
+
++ /**
++ * The maximum number of fields allows by the form
++ */
++ private final int maxFields;
++
++ /**
++ * The maximum number of accumulated bytes when decoding a field
++ */
++ private final int maxBufferedBytes;
++
+ /**
+ * Default charset to use
+ */
+@@ -173,9 +183,34 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
+ * errors
+ */
+ public HttpPostMultipartRequestDecoder(HttpDataFactory factory, HttpRequest request, Charset charset) {
++ this(factory, request, charset, HttpPostRequestDecoder.DEFAULT_MAX_FIELDS, HttpPostRequestDecoder.DEFAULT_MAX_BUFFERED_BYTES);
++ }
++
++ /**
++ *
++ * @param factory
++ * the factory used to create InterfaceHttpData
++ * @param request
++ * the request to decode
++ * @param charset
++ * the charset to use as default
++ * @param maxFields
++ * the maximum number of fields the form can have, {@code -1} to disable
++ * @param maxBufferedBytes
++ * the maximum number of bytes the decoder can buffer when decoding a field, {@code -1} to disable
++ * @throws NullPointerException
++ * for request or charset or factory
++ * @throws ErrorDataDecoderException
++ * if the default charset was wrong when decoding or other
++ * errors
++ */
++ public HttpPostMultipartRequestDecoder(HttpDataFactory factory, HttpRequest request, Charset charset,
++ int maxFields, int maxBufferedBytes) {
+ this.request = checkNotNull(request, "request");
+ this.charset = checkNotNull(charset, "charset");
+ this.factory = checkNotNull(factory, "factory");
++ this.maxFields = maxFields;
++ this.maxBufferedBytes = maxBufferedBytes;
+ // Fill default values
+
+ setMultipart(this.request.headers().get(HttpHeaderNames.CONTENT_TYPE));
+@@ -334,6 +369,9 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
+ isLastChunk = true;
+ }
+ parseBody();
++ if (maxBufferedBytes > 0 && undecodedChunk != null && undecodedChunk.readableBytes() > maxBufferedBytes) {
++ throw new HttpPostRequestDecoder.TooLongFormFieldException();
++ }
+ if (undecodedChunk != null && undecodedChunk.writerIndex() > discardThreshold) {
+ undecodedChunk.discardReadBytes();
+ }
+@@ -418,6 +456,9 @@ public class HttpPostMultipartRequestDecoder implements InterfaceHttpPostRequest
+ if (data == null) {
+ return;
+ }
++ if (maxFields > 0 && bodyListHttpData.size() >= maxFields) {
++ throw new HttpPostRequestDecoder.TooManyFormFieldsException();
++ }
+ List<InterfaceHttpData> datas = bodyMapHttpData.get(data.getName());
+ if (datas == null) {
+ datas = new ArrayList<InterfaceHttpData>(1);
+diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoder.java
+index 0183b23..7812f3e 100644
+--- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoder.java
++++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoder.java
+@@ -37,6 +37,10 @@ public class HttpPostRequestDecoder implements InterfaceHttpPostRequestDecoder {
+
+ static final int DEFAULT_DISCARD_THRESHOLD = 10 * 1024 * 1024;
+
++ static final int DEFAULT_MAX_FIELDS = 128;
++
++ static final int DEFAULT_MAX_BUFFERED_BYTES = 1024;
++
+ private final InterfaceHttpPostRequestDecoder decoder;
+
+ /**
+@@ -53,6 +57,25 @@ public class HttpPostRequestDecoder implements InterfaceHttpPostRequestDecoder {
+ this(new DefaultHttpDataFactory(DefaultHttpDataFactory.MINSIZE), request, HttpConstants.DEFAULT_CHARSET);
+ }
+
++ /**
++ *
++ * @param request
++ * the request to decode
++ * @param maxFields
++ * the maximum number of fields the form can have, {@code -1} to disable
++ * @param maxBufferedBytes
++ * the maximum number of bytes the decoder can buffer when decoding a field, {@code -1} to disable
++ * @throws NullPointerException
++ * for request
++ * @throws ErrorDataDecoderException
++ * if the default charset was wrong when decoding or other
++ * errors
++ */
++ public HttpPostRequestDecoder(HttpRequest request, int maxFields, int maxBufferedBytes) {
++ this(new DefaultHttpDataFactory(DefaultHttpDataFactory.MINSIZE), request, HttpConstants.DEFAULT_CHARSET,
++ maxFields, maxBufferedBytes);
++ }
++
+ /**
+ *
+ * @param factory
+@@ -96,6 +119,38 @@ public class HttpPostRequestDecoder implements InterfaceHttpPostRequestDecoder {
+ }
+ }
+
++ /**
++ *
++ * @param factory
++ * the factory used to create InterfaceHttpData
++ * @param request
++ * the request to decode
++ * @param charset
++ * the charset to use as default
++ * @param maxFields
++ * the maximum number of fields the form can have, {@code -1} to disable
++ * @param maxBufferedBytes
++ * the maximum number of bytes the decoder can buffer when decoding a field, {@code -1} to disable
++ * @throws NullPointerException
++ * for request or charset or factory
++ * @throws ErrorDataDecoderException
++ * if the default charset was wrong when decoding or other
++ * errors
++ */
++ public HttpPostRequestDecoder(HttpDataFactory factory, HttpRequest request, Charset charset,
++ int maxFields, int maxBufferedBytes) {
++ ObjectUtil.checkNotNull(factory, "factory");
++ ObjectUtil.checkNotNull(request, "request");
++ ObjectUtil.checkNotNull(charset, "charset");
++
++ // Fill default values
++ if (isMultipart(request)) {
++ decoder = new HttpPostMultipartRequestDecoder(factory, request, charset, maxFields, maxBufferedBytes);
++ } else {
++ decoder = new HttpPostStandardRequestDecoder(factory, request, charset, maxFields, maxBufferedBytes);
++ }
++ }
++
+ /**
+ * states follow NOTSTARTED PREAMBLE ( (HEADERDELIMITER DISPOSITION (FIELD |
+ * FILEUPLOAD))* (HEADERDELIMITER DISPOSITION MIXEDPREAMBLE (MIXEDDELIMITER
+@@ -338,4 +393,18 @@ public class HttpPostRequestDecoder implements InterfaceHttpPostRequestDecoder {
+ super(msg, cause);
+ }
+ }
++
++ /**
++ * Exception when the maximum number of fields for a given form is reached
++ */
++ public static final class TooManyFormFieldsException extends DecoderException {
++ private static final long serialVersionUID = 1336267941020800769L;
++ }
++
++ /**
++ * Exception when a field content is too long
++ */
++ public static final class TooLongFormFieldException extends DecoderException {
++ private static final long serialVersionUID = 1336267941020800769L;
++ }
+ }
+diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java
+index 7b94a7c..47f9315 100644
+--- a/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java
++++ b/codec-http/src/main/java/io/netty/handler/codec/http/multipart/HttpPostStandardRequestDecoder.java
+@@ -16,6 +16,7 @@
+ package io.netty.handler.codec.http.multipart;
+
+ import io.netty.buffer.ByteBuf;
++import io.netty.handler.codec.DecoderException;
+ import io.netty.handler.codec.http.HttpConstants;
+ import io.netty.handler.codec.http.HttpContent;
+ import io.netty.handler.codec.http.HttpRequest;
+@@ -26,6 +27,8 @@ import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.EndOfDataDec
+ import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.ErrorDataDecoderException;
+ import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.MultiPartStatus;
+ import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.NotEnoughDataDecoderException;
++import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.TooManyFormFieldsException;
++import io.netty.handler.codec.http.multipart.HttpPostRequestDecoder.TooLongFormFieldException;
+ import io.netty.util.internal.PlatformDependent;
+
+ import java.io.IOException;
+@@ -61,6 +64,16 @@ public class HttpPostStandardRequestDecoder implements InterfaceHttpPostRequestD
+ */
+ private final Charset charset;
+
++ /**
++ * The maximum number of fields allows by the form
++ */
++ private final int maxFields;
++
++ /**
++ * The maximum number of accumulated bytes when decoding a field
++ */
++ private final int maxBufferedBytes;
++
+ /**
+ * Does the last chunk already received
+ */
+@@ -146,9 +159,34 @@ public class HttpPostStandardRequestDecoder implements InterfaceHttpPostRequestD
+ * errors
+ */
+ public HttpPostStandardRequestDecoder(HttpDataFactory factory, HttpRequest request, Charset charset) {
++ this(factory, request, charset, HttpPostRequestDecoder.DEFAULT_MAX_FIELDS, HttpPostRequestDecoder.DEFAULT_MAX_BUFFERED_BYTES);
++ }
++
++ /**
++ *
++ * @param factory
++ * the factory used to create InterfaceHttpData
++ * @param request
++ * the request to decode
++ * @param charset
++ * the charset to use as default
++ * @param maxFields
++ * the maximum number of fields the form can have, {@code -1} to disable
++ * @param maxBufferedBytes
++ * the maximum number of bytes the decoder can buffer when decoding a field, {@code -1} to disable
++ * @throws NullPointerException
++ * for request or charset or factory
++ * @throws ErrorDataDecoderException
++ * if the default charset was wrong when decoding or other
++ * errors
++ */
++ public HttpPostStandardRequestDecoder(HttpDataFactory factory, HttpRequest request, Charset charset,
++ int maxFields, int maxBufferedBytes) {
+ this.request = checkNotNull(request, "request");
+ this.charset = checkNotNull(charset, "charset");
+ this.factory = checkNotNull(factory, "factory");
++ this.maxFields = maxFields;
++ this.maxBufferedBytes = maxBufferedBytes;
+ try {
+ if (request instanceof HttpContent) {
+ // Offer automatically if the given request is als type of HttpContent
+@@ -293,6 +331,9 @@ public class HttpPostStandardRequestDecoder implements InterfaceHttpPostRequestD
+ isLastChunk = true;
+ }
+ parseBody();
++ if (maxBufferedBytes > 0 && undecodedChunk != null && undecodedChunk.readableBytes() > maxBufferedBytes) {
++ throw new TooLongFormFieldException();
++ }
+ if (undecodedChunk != null && undecodedChunk.writerIndex() > discardThreshold) {
+ undecodedChunk.discardReadBytes();
+ }
+@@ -373,6 +414,9 @@ public class HttpPostStandardRequestDecoder implements InterfaceHttpPostRequestD
+ if (data == null) {
+ return;
+ }
++ if (maxFields > 0 && bodyListHttpData.size() >= maxFields) {
++ throw new TooManyFormFieldsException();
++ }
+ List<InterfaceHttpData> datas = bodyMapHttpData.get(data.getName());
+ if (datas == null) {
+ datas = new ArrayList<InterfaceHttpData>(1);
+diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java
+index 40771e0..bbd43be 100644
+--- a/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java
++++ b/codec-http/src/test/java/io/netty/handler/codec/http/multipart/HttpPostRequestDecoderTest.java
+@@ -19,6 +19,7 @@ import io.netty.buffer.ByteBuf;
+ import io.netty.buffer.ByteBufAllocator;
+ import io.netty.buffer.Unpooled;
+ import io.netty.buffer.UnpooledByteBufAllocator;
++import io.netty.handler.codec.DecoderException;
+ import io.netty.handler.codec.DecoderResult;
+ import io.netty.handler.codec.http.DefaultFullHttpRequest;
+ import io.netty.handler.codec.http.DefaultHttpContent;
+@@ -803,4 +804,106 @@ public class HttpPostRequestDecoderTest {
+ decoder.destroy();
+ req.release();
+ }
++
++ @Test
++ public void testTooManyFormFieldsPostStandardDecoder() {
++ HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/");
++
++ HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(req, 1024, -1);
++
++ int num = 0;
++ while (true) {
++ try {
++ decoder.offer(new DefaultHttpContent(Unpooled.wrappedBuffer("foo=bar&".getBytes())));
++ } catch (DecoderException e) {
++ assertEquals(HttpPostRequestDecoder.TooManyFormFieldsException.class, e.getClass());
++ break;
++ }
++ assertTrue(num++ < 1024);
++ }
++ assertEquals(1024, num);
++ }
++
++ @Test
++ public void testTooManyFormFieldsPostMultipartDecoder() {
++ HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/");
++ req.headers().add("Content-Type", "multipart/form-data;boundary=be38b42a9ad2713f");
++
++ HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(req, 1024, -1);
++ decoder.offer(new DefaultHttpContent(Unpooled.wrappedBuffer("--be38b42a9ad2713f\n".getBytes())));
++
++ int num = 0;
++ while (true) {
++ try {
++ byte[] bodyBytes = ("content-disposition: form-data; name=\"title\"\n" +
++ "content-length: 10\n" +
++ "content-type: text/plain; charset=UTF-8\n" +
++ "\n" +
++ "bar-stream\n" +
++ "--be38b42a9ad2713f\n").getBytes();
++ ByteBuf content = Unpooled.wrappedBuffer(bodyBytes);
++ decoder.offer(new DefaultHttpContent(content));
++ } catch (DecoderException e) {
++ assertEquals(HttpPostRequestDecoder.TooManyFormFieldsException.class, e.getClass());
++ break;
++ }
++ assertTrue(num++ < 1024);
++ }
++ assertEquals(1024, num);
++ }
++
++ @Test
++ public void testTooLongFormFieldStandardDecoder() {
++ HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/");
++
++ HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(req, -1, 16 * 1024);
++
++ try {
++ decoder.offer(new DefaultHttpContent(Unpooled.wrappedBuffer(new byte[16 * 1024 + 1])));
++ fail();
++ } catch (DecoderException e) {
++ assertEquals(HttpPostRequestDecoder.TooLongFormFieldException.class, e.getClass());
++ }
++ }
++
++ @Test
++ public void testFieldGreaterThanMaxBufferedBytesStandardDecoder() {
++ HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/");
++
++ HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(req, -1, 6);
++
++ decoder.offer(new DefaultHttpContent(Unpooled.wrappedBuffer("foo=bar".getBytes())));
++ }
++
++ @Test
++ public void testTooLongFormFieldMultipartDecoder() {
++ HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/");
++ req.headers().add("Content-Type", "multipart/form-data;boundary=be38b42a9ad2713f");
++
++ HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(req, -1, 16 * 1024);
++
++ try {
++ decoder.offer(new DefaultHttpContent(Unpooled.wrappedBuffer(new byte[16 * 1024 + 1])));
++ fail();
++ } catch (DecoderException e) {
++ assertEquals(HttpPostRequestDecoder.TooLongFormFieldException.class, e.getClass());
++ }
++ }
++
++ @Test
++ public void testFieldGreaterThanMaxBufferedBytesMultipartDecoder() {
++ HttpRequest req = new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/");
++ req.headers().add("Content-Type", "multipart/form-data;boundary=be38b42a9ad2713f");
++
++ byte[] bodyBytes = ("content-disposition: form-data; name=\"title\"\n" +
++ "content-length: 10\n" +
++ "content-type: text/plain; charset=UTF-8\n" +
++ "\n" +
++ "bar-stream\n" +
++ "--be38b42a9ad2713f\n").getBytes();
++
++ HttpPostRequestDecoder decoder = new HttpPostRequestDecoder(req, -1, bodyBytes.length - 1);
++
++ decoder.offer(new DefaultHttpContent(Unpooled.wrappedBuffer(bodyBytes)));
++ }
+ }
=====================================
debian/patches/CVE-2025-55163_1.patch
=====================================
@@ -29,11 +29,9 @@ bug-github-pull: https://github.com/netty/netty/pull/15516
.../codec/http2/Http2ConnectionHandlerTest.java | 22 +++++++++++-----------
2 files changed, 14 insertions(+), 14 deletions(-)
-diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2ConnectionHandler.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2ConnectionHandler.java
-index 909ca74..0dd73cb 100644
--- a/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2ConnectionHandler.java
+++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2ConnectionHandler.java
-@@ -706,7 +706,7 @@ public class Http2ConnectionHandler extends ByteToMessageDecoder implements Http
+@@ -706,7 +706,7 @@
try {
stream = encoder.connection().remote().createStream(streamId, true);
} catch (Http2Exception e) {
@@ -42,7 +40,7 @@ index 909ca74..0dd73cb 100644
return;
}
}
-@@ -723,10 +723,10 @@ public class Http2ConnectionHandler extends ByteToMessageDecoder implements Http
+@@ -723,10 +723,10 @@
if (stream == null) {
if (!outbound || connection().local().mayHaveCreatedStream(streamId)) {
@@ -55,11 +53,9 @@ index 909ca74..0dd73cb 100644
}
}
-diff --git a/codec-http2/src/test/java/io/netty/handler/codec/http2/Http2ConnectionHandlerTest.java b/codec-http2/src/test/java/io/netty/handler/codec/http2/Http2ConnectionHandlerTest.java
-index 0143edc..90b0ef6 100644
--- a/codec-http2/src/test/java/io/netty/handler/codec/http2/Http2ConnectionHandlerTest.java
+++ b/codec-http2/src/test/java/io/netty/handler/codec/http2/Http2ConnectionHandlerTest.java
-@@ -403,7 +403,7 @@ public class Http2ConnectionHandlerTest {
+@@ -403,7 +403,7 @@
when(connection.isServer()).thenReturn(true);
when(stream.isHeadersSent()).thenReturn(false);
when(remote.lastStreamCreated()).thenReturn(STREAM_ID);
@@ -68,7 +64,7 @@ index 0143edc..90b0ef6 100644
eq(PROTOCOL_ERROR.code()), eq(promise))).thenReturn(future);
handler.exceptionCaught(ctx, e);
-@@ -413,7 +413,7 @@ public class Http2ConnectionHandlerTest {
+@@ -413,7 +413,7 @@
captor.capture(), eq(padding), eq(true), eq(promise));
Http2Headers headers = captor.getValue();
assertEquals(HttpResponseStatus.REQUEST_HEADER_FIELDS_TOO_LARGE.codeAsText(), headers.status());
@@ -77,7 +73,7 @@ index 0143edc..90b0ef6 100644
}
@Test
-@@ -427,14 +427,14 @@ public class Http2ConnectionHandlerTest {
+@@ -427,14 +427,14 @@
when(connection.isServer()).thenReturn(true);
when(stream.isHeadersSent()).thenReturn(false);
when(remote.lastStreamCreated()).thenReturn(STREAM_ID);
@@ -94,7 +90,7 @@ index 0143edc..90b0ef6 100644
}
@Test
-@@ -448,14 +448,14 @@ public class Http2ConnectionHandlerTest {
+@@ -448,14 +448,14 @@
when(connection.isServer()).thenReturn(false);
when(stream.isHeadersSent()).thenReturn(false);
when(remote.lastStreamCreated()).thenReturn(STREAM_ID);
@@ -111,7 +107,7 @@ index 0143edc..90b0ef6 100644
}
@Test
-@@ -484,14 +484,14 @@ public class Http2ConnectionHandlerTest {
+@@ -484,14 +484,14 @@
when(connection.isServer()).thenReturn(true);
when(stream.isHeadersSent()).thenReturn(true);
when(remote.lastStreamCreated()).thenReturn(STREAM_ID);
@@ -128,7 +124,7 @@ index 0143edc..90b0ef6 100644
}
@Test
-@@ -508,15 +508,15 @@ public class Http2ConnectionHandlerTest {
+@@ -508,15 +508,15 @@
when(connection.isServer()).thenReturn(true);
when(stream.isHeadersSent()).thenReturn(false);
when(remote.lastStreamCreated()).thenReturn(STREAM_ID);
=====================================
debian/patches/CVE-2025-55163_before-1.patch
=====================================
@@ -1,7 +1,3 @@
-From: Debian Java Maintainers <pkg-java-maintainers at lists.alioth.debian.org>
-Date: Sat, 15 Nov 2025 10:14:12 +0100
-Subject: CVE-2025-55163_before-1
-
commit 9b80d081ff3478c46152b012ae0e21f939467ac3
Author: Norman Maurer <norman_maurer at apple.com>
Date: Sat Oct 28 20:49:44 2023 +0200
@@ -24,17 +20,10 @@ Only add limit where needed
origin: backport, https://github.com/netty/netty/commit/9b80d081ff3478c46152b012ae0e21f939467ac3
bug-github-pull: https://github.com/netty/netty/pull/13671
----
- .../http2/AbstractHttp2ConnectionHandlerBuilder.java | 19 ++++++++++++++++---
- .../handler/codec/http2/Http2FrameCodecBuilder.java | 4 ++--
- .../codec/http2/Http2MultiplexCodecBuilder.java | 4 ++--
- 3 files changed, 20 insertions(+), 7 deletions(-)
-diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractHttp2ConnectionHandlerBuilder.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractHttp2ConnectionHandlerBuilder.java
-index a904310..fc70dfe 100644
--- a/codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractHttp2ConnectionHandlerBuilder.java
+++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/AbstractHttp2ConnectionHandlerBuilder.java
-@@ -78,6 +78,8 @@ public abstract class AbstractHttp2ConnectionHandlerBuilder<T extends Http2Conne
+@@ -78,6 +78,8 @@
private static final SensitivityDetector DEFAULT_HEADER_SENSITIVITY_DETECTOR = Http2HeadersEncoder.NEVER_SENSITIVE;
@@ -43,7 +32,7 @@ index a904310..fc70dfe 100644
// The properties that can always be set.
private Http2Settings initialSettings = Http2Settings.defaultSettings();
private Http2FrameListener frameListener;
-@@ -109,7 +111,7 @@ public abstract class AbstractHttp2ConnectionHandlerBuilder<T extends Http2Conne
+@@ -109,7 +111,7 @@
private boolean autoAckPingFrame = true;
private int maxQueuedControlFrames = Http2CodecUtil.DEFAULT_MAX_QUEUED_CONTROL_FRAMES;
private int maxConsecutiveEmptyFrames = 2;
@@ -52,7 +41,7 @@ index a904310..fc70dfe 100644
private int secondsPerWindow = 30;
/**
-@@ -562,8 +564,19 @@ public abstract class AbstractHttp2ConnectionHandlerBuilder<T extends Http2Conne
+@@ -562,8 +564,19 @@
if (maxConsecutiveEmptyDataFrames > 0) {
decoder = new Http2EmptyDataFrameConnectionDecoder(decoder, maxConsecutiveEmptyDataFrames);
}
@@ -74,11 +63,9 @@ index a904310..fc70dfe 100644
}
final T handler;
try {
-diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2FrameCodecBuilder.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2FrameCodecBuilder.java
-index 241c9c5..ab183e5 100644
--- a/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2FrameCodecBuilder.java
+++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2FrameCodecBuilder.java
-@@ -179,8 +179,8 @@ public class Http2FrameCodecBuilder extends
+@@ -179,8 +179,8 @@
@Override
public Http2FrameCodecBuilder decoderEnforceMaxRstFramesPerWindow(
@@ -89,11 +76,9 @@ index 241c9c5..ab183e5 100644
}
/**
-diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2MultiplexCodecBuilder.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2MultiplexCodecBuilder.java
-index a3c0bed..0a38d4e 100644
--- a/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2MultiplexCodecBuilder.java
+++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/Http2MultiplexCodecBuilder.java
-@@ -208,8 +208,8 @@ public class Http2MultiplexCodecBuilder
+@@ -208,8 +208,8 @@
@Override
public Http2MultiplexCodecBuilder decoderEnforceMaxRstFramesPerWindow(
=====================================
debian/patches/CVE-2025-58056.patch
=====================================
@@ -1,48 +1,247 @@
-From: Norman Maurer <norman_maurer at apple.com>
-Date: Wed, 3 Sep 2025 10:35:05 +0200
-Subject: Merge commit from fork (#15612)
+From 39d3ecf8f0c57a7469ba927b2163d4cb4314b138 Mon Sep 17 00:00:00 2001
+From: Chris Vest <christianvest_hansen at apple.com>
+Date: Tue, 2 Sep 2025 23:25:09 -0700
+Subject: [PATCH] Merge commit from fork
+
+* Prevent HTTP request/response smuggling via chunk encoding
Motivation:
+Transfer-Encoding: chunked has some strict rules around parsing CR LF delimiters.
+If we are too lenient, it can cause request/response smuggling issues when combined with proxies that are lenient in different ways.
+See https://w4ke.info/2025/06/18/funky-chunks.html for the details.
-We should ensure our decompressing decoders will fire their buffers
-through the pipeliner as fast as possible and so allow the user to take
-ownership of these as fast as possible. This is needed to reduce the
-risk of OOME as otherwise a small input might produce a large amount of
-data that can't be processed until all the data was decompressed in a
-loop. Beside this we also should ensure that other handlers that uses
-these decompressors will not buffer all of the produced data before
-processing it, which was true for HTTP and HTTP2.
+Modification:
+- Make sure that we reject chunks with chunk-extensions that contain lone Line Feed octets without their preceding Carriage Return octet.
+- Make sure that we issue HttpContent objects with decoding failures, if we decode a chunk and it isn't immediately followed by a CR LF octet pair.
-Modifications:
+Result:
+Smuggling requests/responses is no longer possible.
-- Adjust affected decoders (Brotli, Zstd and ZLib) to fire buffers
- through the pipeline as soon as possible
-- Adjust HTTP / HTTP2 decompressors to do the same
-- Add testcase.
+Fixes https://github.com/netty/netty/issues/15522
-Result:
+* Enforce CR LF line separators for HTTP messages by default
+
+But also make it configurable through `HttpDecoderConfig`, and add a system property opt-out to change the default back.
+
+* Remove property for the name of the strict line parsing property
-Less risk of OOME when doing decompressing
+* Remove HeaderParser.parse overload that only takes a buffer argument
-Co-authored-by: yawkat <jonas.konrad at oracle.com>
-origin: backport, https://github.com/netty/netty/commit/39d3ecf8f0c57a7469ba927b2163d4cb4314b138
-bug: https://github.com/netty/netty/security/advisories/GHSA-3p8m-j85q-pgmj
-bug-debian: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1113994
+Origin: backport, https://github.com/netty/netty/commit/39d3ecf8f0c57a7469ba927b2163d4cb4314b138
+
+[Ubuntu note: This patch uses a new constructor to configure strict line
+parsing (since HttpDecoderConfig.java does not exist), and uses a new field to
+pass strictCRLFCheck from HeaderParser.parse() to HeaderParser.process().
+-- Edwin Jiang <edwin.jiang at canonical.com]
---
- .../codec/compression/JZlibIntegrationTest.java | 31 ++++++++++++++++++++++
- .../codec/compression/JdkZlibIntegrationTest.java | 31 ++++++++++++++++++++++
- 2 files changed, 62 insertions(+)
- create mode 100644 codec/src/test/java/io/netty/handler/codec/compression/JZlibIntegrationTest.java
- create mode 100644 codec/src/test/java/io/netty/handler/codec/compression/JdkZlibIntegrationTest.java
+ codec-http/src/main/java/io/netty/handler/codec/http/HttpObjectDecoder.java | 71 +++-
+ codec-http/src/main/java/io/netty/handler/codec/http/HttpRequestDecoder.java | 9
+ codec-http/src/main/java/io/netty/handler/codec/http/HttpResponseDecoder.java | 9
+ codec-http/src/main/java/io/netty/handler/codec/http/InvalidChunkExtensionException.java | 45 +++
+ codec-http/src/main/java/io/netty/handler/codec/http/InvalidChunkTerminationException.java | 45 +++
+ codec-http/src/main/java/io/netty/handler/codec/http/InvalidLineSeparatorException.java | 48 +++
+ codec-http/src/test/java/io/netty/handler/codec/http/HttpRequestDecoderTest.java | 148 ++++++++--
+ codec-http/src/test/java/io/netty/handler/codec/http/HttpResponseDecoderTest.java | 62 ++++
+ 8 files changed, 403 insertions(+), 34 deletions(-)
+ create mode 100644 codec-http/src/main/java/io/netty/handler/codec/http/InvalidChunkExtensionException.java
+ create mode 100644 codec-http/src/main/java/io/netty/handler/codec/http/InvalidChunkTerminationException.java
+ create mode 100644 codec-http/src/main/java/io/netty/handler/codec/http/InvalidLineSeparatorException.java
-diff --git a/codec/src/test/java/io/netty/handler/codec/compression/JZlibIntegrationTest.java b/codec/src/test/java/io/netty/handler/codec/compression/JZlibIntegrationTest.java
-new file mode 100644
-index 0000000..252f134
+--- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpObjectDecoder.java
++++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpObjectDecoder.java
+@@ -27,6 +27,7 @@
+ import io.netty.handler.codec.TooLongFrameException;
+ import io.netty.util.ByteProcessor;
+ import io.netty.util.internal.AppendableCharSequence;
++import io.netty.util.internal.SystemPropertyUtil;
+
+ import java.util.List;
+
+@@ -104,11 +105,28 @@
+ public abstract class HttpObjectDecoder extends ByteToMessageDecoder {
+ public static final boolean DEFAULT_ALLOW_DUPLICATE_CONTENT_LENGTHS = false;
+ private static final String EMPTY_VALUE = "";
++ public static final boolean DEFAULT_STRICT_LINE_PARSING =
++ SystemPropertyUtil.getBoolean("io.netty.handler.codec.http.defaultStrictLineParsing", true);
++
++ private static final Runnable THROW_INVALID_CHUNK_EXTENSION = new Runnable() {
++ @Override
++ public void run() {
++ throw new InvalidChunkExtensionException();
++ }
++ };
++
++ private static final Runnable THROW_INVALID_LINE_SEPARATOR = new Runnable() {
++ @Override
++ public void run() {
++ throw new InvalidLineSeparatorException();
++ }
++ };
+
+ private final int maxChunkSize;
+ private final boolean chunkedSupported;
+ protected final boolean validateHeaders;
+ private final boolean allowDuplicateContentLengths;
++ private final Runnable defaultStrictCRLFCheck;
+ private final HeaderParser headerParser;
+ private final LineParser lineParser;
+
+@@ -180,6 +198,15 @@
+ int maxInitialLineLength, int maxHeaderSize, int maxChunkSize,
+ boolean chunkedSupported, boolean validateHeaders, int initialBufferSize,
+ boolean allowDuplicateContentLengths) {
++ this(maxInitialLineLength, maxHeaderSize, maxChunkSize, chunkedSupported,
++ validateHeaders, initialBufferSize, allowDuplicateContentLengths,
++ DEFAULT_STRICT_LINE_PARSING);
++ }
++
++ protected HttpObjectDecoder(
++ int maxInitialLineLength, int maxHeaderSize, int maxChunkSize,
++ boolean chunkedSupported, boolean validateHeaders, int initialBufferSize,
++ boolean allowDuplicateContentLengths, boolean strictLineParsing) {
+ checkPositive(maxInitialLineLength, "maxInitialLineLength");
+ checkPositive(maxHeaderSize, "maxHeaderSize");
+ checkPositive(maxChunkSize, "maxChunkSize");
+@@ -191,6 +218,7 @@
+ this.chunkedSupported = chunkedSupported;
+ this.validateHeaders = validateHeaders;
+ this.allowDuplicateContentLengths = allowDuplicateContentLengths;
++ defaultStrictCRLFCheck = strictLineParsing ? THROW_INVALID_LINE_SEPARATOR : null;
+ }
+
+ @Override
+@@ -203,7 +231,7 @@
+ case SKIP_CONTROL_CHARS:
+ // Fall-through
+ case READ_INITIAL: try {
+- AppendableCharSequence line = lineParser.parse(buffer);
++ AppendableCharSequence line = lineParser.parse(buffer, defaultStrictCRLFCheck);
+ if (line == null) {
+ return;
+ }
+@@ -313,11 +341,11 @@
+ return;
+ }
+ /**
+- * everything else after this point takes care of reading chunked content. basically, read chunk size,
++ * Everything else after this point takes care of reading chunked content. Basically, read chunk size,
+ * read chunk, read and ignore the CRLF and repeat until 0
+ */
+ case READ_CHUNK_SIZE: try {
+- AppendableCharSequence line = lineParser.parse(buffer);
++ AppendableCharSequence line = lineParser.parse(buffer, THROW_INVALID_CHUNK_EXTENSION);
+ if (line == null) {
+ return;
+ }
+@@ -352,16 +380,16 @@
+ // fall-through
+ }
+ case READ_CHUNK_DELIMITER: {
+- final int wIdx = buffer.writerIndex();
+- int rIdx = buffer.readerIndex();
+- while (wIdx > rIdx) {
+- byte next = buffer.getByte(rIdx++);
+- if (next == HttpConstants.LF) {
++ if (buffer.readableBytes() >= 2) {
++ int rIdx = buffer.readerIndex();
++ if (buffer.getByte(rIdx) == HttpConstants.CR &&
++ buffer.getByte(rIdx + 1) == HttpConstants.LF) {
++ buffer.skipBytes(2);
+ currentState = State.READ_CHUNK_SIZE;
+- break;
++ } else {
++ out.add(invalidChunk(buffer, new InvalidChunkTerminationException()));
+ }
+ }
+- buffer.readerIndex(rIdx);
+ return;
+ }
+ case READ_CHUNK_FOOTER: try {
+@@ -560,7 +588,7 @@
+ final HttpMessage message = this.message;
+ final HttpHeaders headers = message.headers();
+
+- AppendableCharSequence line = headerParser.parse(buffer);
++ AppendableCharSequence line = headerParser.parse(buffer, defaultStrictCRLFCheck);
+ if (line == null) {
+ return null;
+ }
+@@ -580,7 +608,7 @@
+ splitHeader(line);
+ }
+
+- line = headerParser.parse(buffer);
++ line = headerParser.parse(buffer, defaultStrictCRLFCheck);
+ if (line == null) {
+ return null;
+ }
+@@ -661,7 +689,7 @@
+ }
+
+ private LastHttpContent readTrailingHeaders(ByteBuf buffer) {
+- AppendableCharSequence line = headerParser.parse(buffer);
++ AppendableCharSequence line = headerParser.parse(buffer, defaultStrictCRLFCheck);
+ if (line == null) {
+ return null;
+ }
+@@ -701,7 +729,7 @@
+ name = null;
+ value = null;
+ }
+- line = headerParser.parse(buffer);
++ line = headerParser.parse(buffer, defaultStrictCRLFCheck);
+ if (line == null) {
+ return null;
+ }
+@@ -865,14 +893,19 @@
+ private final int maxLength;
+ private int size;
+
++ private Runnable strictCRLFCheck;
++
+ HeaderParser(AppendableCharSequence seq, int maxLength) {
+ this.seq = seq;
+ this.maxLength = maxLength;
+ }
+
+- public AppendableCharSequence parse(ByteBuf buffer) {
++ public AppendableCharSequence parse(ByteBuf buffer, Runnable strictCRLFCheck) {
+ final int oldSize = size;
+ seq.reset();
++
++ this.strictCRLFCheck = strictCRLFCheck;
++
+ int i = buffer.forEachByte(this);
+ if (i == -1) {
+ size = oldSize;
+@@ -895,6 +928,10 @@
+ if (len >= 1 && seq.charAtUnsafe(len - 1) == HttpConstants.CR) {
+ -- size;
+ seq.setLength(len - 1);
++ } else {
++ if (strictCRLFCheck != null) {
++ strictCRLFCheck.run();
++ }
+ }
+ return false;
+ }
+@@ -927,9 +964,9 @@
+ }
+
+ @Override
+- public AppendableCharSequence parse(ByteBuf buffer) {
++ public AppendableCharSequence parse(ByteBuf buffer, Runnable strictCRLFCheck) {
+ reset();
+- return super.parse(buffer);
++ return super.parse(buffer, strictCRLFCheck);
+ }
+
+ @Override
--- /dev/null
-+++ b/codec/src/test/java/io/netty/handler/codec/compression/JZlibIntegrationTest.java
-@@ -0,0 +1,31 @@
++++ b/codec-http/src/main/java/io/netty/handler/codec/http/InvalidChunkExtensionException.java
+@@ -0,0 +1,45 @@
+/*
-+ * Copyright 2014 The Netty Project
++ * Copyright 2025 The Netty Project
+ *
+ * The Netty Project licenses this file to you under the Apache License,
+ * version 2.0 (the "License"); you may not use this file except in compliance
@@ -56,30 +255,41 @@ index 0000000..252f134
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
-+package io.netty.handler.codec.compression;
++package io.netty.handler.codec.http;
+
-+import io.netty.channel.embedded.EmbeddedChannel;
++import io.netty.handler.codec.CorruptedFrameException;
+
-+public class JZlibIntegrationTest extends AbstractIntegrationTest {
++/**
++ * Thrown when HTTP chunk extensions could not be parsed, typically due to incorrect use of CR LF delimiters.
++ * <p>
++ * <a href="https://datatracker.ietf.org/doc/html/rfc9112#name-chunked-transfer-coding">RFC 9112</a>
++ * specifies that chunk header lines must be terminated in a CR LF pair,
++ * and that a lone LF octet is not allowed within the chunk header line.
++ */
++public final class InvalidChunkExtensionException extends CorruptedFrameException {
++ private static final long serialVersionUID = 536224937231200736L;
+
-+ @Override
-+ protected EmbeddedChannel createEncoder() {
-+ return new EmbeddedChannel(new JZlibEncoder());
++ public InvalidChunkExtensionException() {
++ super("Line Feed must be preceded by Carriage Return when terminating HTTP chunk header lines");
+ }
+
-+ @Override
-+ protected EmbeddedChannel createDecoder() {
-+ return new EmbeddedChannel(new JZlibDecoder(0));
++ public InvalidChunkExtensionException(String message, Throwable cause) {
++ super(message, cause);
++ }
++
++ public InvalidChunkExtensionException(String message) {
++ super(message);
++ }
++
++ public InvalidChunkExtensionException(Throwable cause) {
++ super(cause);
+ }
+}
-diff --git a/codec/src/test/java/io/netty/handler/codec/compression/JdkZlibIntegrationTest.java b/codec/src/test/java/io/netty/handler/codec/compression/JdkZlibIntegrationTest.java
-new file mode 100644
-index 0000000..6dca41d
--- /dev/null
-+++ b/codec/src/test/java/io/netty/handler/codec/compression/JdkZlibIntegrationTest.java
-@@ -0,0 +1,31 @@
++++ b/codec-http/src/main/java/io/netty/handler/codec/http/InvalidChunkTerminationException.java
+@@ -0,0 +1,45 @@
+/*
-+ * Copyright 2014 The Netty Project
++ * Copyright 2025 The Netty Project
+ *
+ * The Netty Project licenses this file to you under the Apache License,
+ * version 2.0 (the "License"); you may not use this file except in compliance
@@ -93,19 +303,422 @@ index 0000000..6dca41d
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ */
-+package io.netty.handler.codec.compression;
++package io.netty.handler.codec.http;
++
++import io.netty.handler.codec.CorruptedFrameException;
++
++/**
++ * Thrown when HTTP chunks could not be parsed, typically due to incorrect use of CR LF delimiters.
++ * <p>
++ * <a href="https://datatracker.ietf.org/doc/html/rfc9112#name-chunked-transfer-coding">RFC 9112</a>
++ * specifies that chunk bodies must be terminated in a CR LF pair,
++ * and that the delimiter must follow the given chunk-size number of octets in chunk-data.
++ */
++public final class InvalidChunkTerminationException extends CorruptedFrameException {
++ private static final long serialVersionUID = 536224937231200736L;
+
-+import io.netty.channel.embedded.EmbeddedChannel;
++ public InvalidChunkTerminationException() {
++ super("Chunk data sections must be terminated by a CR LF octet pair");
++ }
+
-+public class JdkZlibIntegrationTest extends AbstractIntegrationTest {
++ public InvalidChunkTerminationException(String message, Throwable cause) {
++ super(message, cause);
++ }
+
-+ @Override
-+ protected EmbeddedChannel createEncoder() {
-+ return new EmbeddedChannel(new JdkZlibEncoder());
++ public InvalidChunkTerminationException(String message) {
++ super(message);
+ }
+
-+ @Override
-+ protected EmbeddedChannel createDecoder() {
-+ return new EmbeddedChannel(new JdkZlibDecoder(0));
++ public InvalidChunkTerminationException(Throwable cause) {
++ super(cause);
+ }
+}
+--- /dev/null
++++ b/codec-http/src/main/java/io/netty/handler/codec/http/InvalidLineSeparatorException.java
+@@ -0,0 +1,48 @@
++/*
++ * Copyright 2025 The Netty Project
++ *
++ * The Netty Project licenses this file to you under the Apache License,
++ * version 2.0 (the "License"); you may not use this file except in compliance
++ * with the License. You may obtain a copy of the License at:
++ *
++ * https://www.apache.org/licenses/LICENSE-2.0
++ *
++ * Unless required by applicable law or agreed to in writing, software
++ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
++ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
++ * License for the specific language governing permissions and limitations
++ * under the License.
++ */
++package io.netty.handler.codec.http;
++
++import io.netty.handler.codec.DecoderException;
++
++/**
++ * Thrown when {@linkplain HttpDecoderConfig#isStrictLineParsing() strict line parsing} is enabled,
++ * and HTTP start- and header field-lines are not seperated by CR LF octet pairs.
++ * <p>
++ * Strict line parsing is enabled by default since Netty 4.1.124 and 4.2.4.
++ * This default can be overridden by setting the {@value HttpObjectDecoder#PROP_DEFAULT_STRICT_LINE_PARSING} system
++ * property to {@code false}.
++ * <p>
++ * See <a href="https://datatracker.ietf.org/doc/html/rfc9112#name-message-format">RFC 9112 Section 2.1</a>.
++ */
++public final class InvalidLineSeparatorException extends DecoderException {
++ private static final long serialVersionUID = 536224937231200736L;
++
++ public InvalidLineSeparatorException() {
++ super("Line Feed must be preceded by Carriage Return when terminating HTTP start- and header field-lines");
++ }
++
++ public InvalidLineSeparatorException(String message, Throwable cause) {
++ super(message, cause);
++ }
++
++ public InvalidLineSeparatorException(String message) {
++ super(message);
++ }
++
++ public InvalidLineSeparatorException(Throwable cause) {
++ super(cause);
++ }
++}
+--- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpRequestDecoderTest.java
++++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpRequestDecoderTest.java
+@@ -19,6 +19,7 @@
+ import io.netty.buffer.Unpooled;
+ import io.netty.channel.embedded.EmbeddedChannel;
+ import io.netty.handler.codec.TooLongFrameException;
++import io.netty.handler.codec.DecoderResult;
+ import io.netty.util.AsciiString;
+ import io.netty.util.CharsetUtil;
+ import org.junit.Test;
+@@ -43,6 +44,12 @@
+ private static final byte[] CONTENT_MIXED_DELIMITERS = createContent("\r\n", "\n");
+ private static final int CONTENT_LENGTH = 8;
+
++ private static final int DEFAULT_MAX_INITIAL_LINE_LENGTH = 4096;
++ private static final int DEFAULT_MAX_HEADER_SIZE = 8192;
++ private static final int DEFAULT_MAX_CHUNK_SIZE = 8192;
++ private static final boolean DEFAULT_VALIDATE_HEADERS = true;
++ private static final int DEFAULT_INITIAL_BUFFER_SIZE = 128;
++
+ private static byte[] createContent(String... lineDelimiters) {
+ String lineDelimiter;
+ String lineDelimiter2;
+@@ -80,18 +87,45 @@
+ testDecodeWholeRequestAtOnce(CONTENT_MIXED_DELIMITERS);
+ }
+
++ @Test
++ public void testDecodeWholeRequestAtOnceFailesWithLFDelimiters() {
++ testDecodeWholeRequestAtOnce(CONTENT_LF_DELIMITERS, DEFAULT_MAX_HEADER_SIZE, true, true);
++ }
++
++ @Test
++ public void testDecodeWholeRequestAtOnceFailsWithMixedDelimiters() {
++ testDecodeWholeRequestAtOnce(CONTENT_MIXED_DELIMITERS, DEFAULT_MAX_HEADER_SIZE, true, true);
++ }
++
+ private static void testDecodeWholeRequestAtOnce(byte[] content) {
+- EmbeddedChannel channel = new EmbeddedChannel(new HttpRequestDecoder());
++ testDecodeWholeRequestAtOnce(content, DEFAULT_MAX_HEADER_SIZE, false, false);
++ }
++
++ private static void testDecodeWholeRequestAtOnce(byte[] content, int maxHeaderSize, boolean strictLineParsing,
++ boolean expectFailure) {
++ EmbeddedChannel channel =
++ new EmbeddedChannel(new HttpRequestDecoder(DEFAULT_MAX_INITIAL_LINE_LENGTH,
++ maxHeaderSize,
++ DEFAULT_MAX_CHUNK_SIZE,
++ DEFAULT_VALIDATE_HEADERS,
++ DEFAULT_INITIAL_BUFFER_SIZE,
++ strictLineParsing));
+ assertTrue(channel.writeInbound(Unpooled.copiedBuffer(content)));
+ HttpRequest req = channel.readInbound();
+ assertNotNull(req);
+- checkHeaders(req.headers());
+- LastHttpContent c = channel.readInbound();
+- assertEquals(CONTENT_LENGTH, c.content().readableBytes());
+- assertEquals(
+- Unpooled.wrappedBuffer(content, content.length - CONTENT_LENGTH, CONTENT_LENGTH),
+- c.content().readSlice(CONTENT_LENGTH));
+- c.release();
++ if (expectFailure) {
++ assertTrue(req.decoderResult().isFailure());
++ assertThat(req.decoderResult().cause(), instanceOf(InvalidLineSeparatorException.class));
++ } else {
++ assertFalse(req.decoderResult().isFailure());
++ checkHeaders(req.headers());
++ LastHttpContent c = channel.readInbound();
++ assertEquals(CONTENT_LENGTH, c.content().readableBytes());
++ assertEquals(
++ Unpooled.wrappedBuffer(content, content.length - CONTENT_LENGTH, CONTENT_LENGTH),
++ c.content().readSlice(CONTENT_LENGTH));
++ c.release();
++ }
+
+ assertFalse(channel.finish());
+ assertNull(channel.readInbound());
+@@ -116,27 +150,45 @@
+
+ @Test
+ public void testDecodeWholeRequestInMultipleStepsCRLFDelimiters() {
+- testDecodeWholeRequestInMultipleSteps(CONTENT_CRLF_DELIMITERS);
++ testDecodeWholeRequestInMultipleSteps(CONTENT_CRLF_DELIMITERS, true, false);
+ }
+
+ @Test
+ public void testDecodeWholeRequestInMultipleStepsLFDelimiters() {
+- testDecodeWholeRequestInMultipleSteps(CONTENT_LF_DELIMITERS);
++ testDecodeWholeRequestInMultipleSteps(CONTENT_LF_DELIMITERS, false, false);
+ }
+
+ @Test
+ public void testDecodeWholeRequestInMultipleStepsMixedDelimiters() {
+- testDecodeWholeRequestInMultipleSteps(CONTENT_MIXED_DELIMITERS);
++ testDecodeWholeRequestInMultipleSteps(CONTENT_MIXED_DELIMITERS, false, false);
+ }
+
+- private static void testDecodeWholeRequestInMultipleSteps(byte[] content) {
++ @Test
++ public void testDecodeWholeRequestInMultipleStepsFailsWithLFDelimiters() {
++ testDecodeWholeRequestInMultipleSteps(CONTENT_LF_DELIMITERS, true, true);
++ }
++
++ @Test
++ public void testDecodeWholeRequestInMultipleStepsFailsWithMixedDelimiters() {
++ testDecodeWholeRequestInMultipleSteps(CONTENT_MIXED_DELIMITERS, true, true);
++ }
++
++ private static void testDecodeWholeRequestInMultipleSteps(
++ byte[] content, boolean strictLineParsing, boolean expectFailure) {
+ for (int i = 1; i < content.length; i++) {
+- testDecodeWholeRequestInMultipleSteps(content, i);
++ testDecodeWholeRequestInMultipleSteps(content, i, strictLineParsing, expectFailure);
+ }
+ }
+
+- private static void testDecodeWholeRequestInMultipleSteps(byte[] content, int fragmentSize) {
+- EmbeddedChannel channel = new EmbeddedChannel(new HttpRequestDecoder());
++ private static void testDecodeWholeRequestInMultipleSteps(
++ byte[] content, int fragmentSize, boolean strictLineParsing, boolean expectFailure) {
++ EmbeddedChannel channel =
++ new EmbeddedChannel(new HttpRequestDecoder(DEFAULT_MAX_INITIAL_LINE_LENGTH,
++ DEFAULT_MAX_HEADER_SIZE,
++ DEFAULT_MAX_CHUNK_SIZE,
++ DEFAULT_VALIDATE_HEADERS,
++ DEFAULT_INITIAL_BUFFER_SIZE,
++ strictLineParsing));
+ int headerLength = content.length - CONTENT_LENGTH;
+
+ // split up the header
+@@ -158,6 +210,12 @@
+
+ HttpRequest req = channel.readInbound();
+ assertNotNull(req);
++ if (expectFailure) {
++ assertTrue(req.decoderResult().isFailure());
++ assertThat(req.decoderResult().cause(), instanceOf(InvalidLineSeparatorException.class));
++ return; // No more messages will be produced.
++ }
++ assertFalse(req.decoderResult().isFailure());
+ checkHeaders(req.headers());
+
+ for (int i = CONTENT_LENGTH; i > 1; i --) {
+@@ -531,6 +589,66 @@
+ }
+
+ @Test
++ public void mustRejectImproperlyTerminatedChunkExtensions() throws Exception {
++ // See full explanation: https://w4ke.info/2025/06/18/funky-chunks.html
++ String requestStr = "GET /one HTTP/1.1\r\n" +
++ "Host: localhost\r\n" +
++ "Transfer-Encoding: chunked\r\n\r\n" +
++ "2;\n" + // Chunk size followed by illegal single newline (not preceded by carraige return)
++ "xx\r\n" +
++ "45\r\n" +
++ "0\r\n\r\n" +
++ "GET /two HTTP/1.1\r\n" +
++ "Host: localhost\r\n" +
++ "Transfer-Encoding: chunked\r\n\r\n" +
++ "0\r\n\r\n";
++ EmbeddedChannel channel = new EmbeddedChannel(new HttpRequestDecoder());
++ assertTrue(channel.writeInbound(Unpooled.copiedBuffer(requestStr, CharsetUtil.US_ASCII)));
++ HttpRequest request = channel.readInbound();
++ assertFalse(request.decoderResult().isFailure()); // We parse the headers just fine.
++ assertTrue(request.headers().names().contains("Transfer-Encoding"));
++ assertTrue(request.headers().contains("Transfer-Encoding", "chunked", false));
++ HttpContent content = channel.readInbound();
++ DecoderResult decoderResult = content.decoderResult();
++ assertTrue(decoderResult.isFailure()); // But parsing the chunk must fail.
++ assertThat(decoderResult.cause(), instanceOf(InvalidChunkExtensionException.class));
++ content.release();
++ assertFalse(channel.finish());
++ }
++
++ @Test
++ public void mustRejectImproperlyTerminatedChunkBodies() throws Exception {
++ // See full explanation: https://w4ke.info/2025/06/18/funky-chunks.html
++ String requestStr = "GET /one HTTP/1.1\r\n" +
++ "Host: localhost\r\n" +
++ "Transfer-Encoding: chunked\r\n\r\n" +
++ "5\r\n" +
++ "AAAAAXX" + // Chunk body contains extra (XX) bytes, and no CRLF terminator.
++ "45\r\n" +
++ "0\r\n" +
++ "GET /two HTTP/1.1\r\n" +
++ "Host: localhost\r\n" +
++ "Transfer-Encoding: chunked\r\n\r\n" +
++ "0\r\n\r\n";
++ EmbeddedChannel channel = new EmbeddedChannel(new HttpRequestDecoder());
++ assertTrue(channel.writeInbound(Unpooled.copiedBuffer(requestStr, CharsetUtil.US_ASCII)));
++ HttpRequest request = channel.readInbound();
++ assertFalse(request.decoderResult().isFailure()); // We parse the headers just fine.
++ assertTrue(request.headers().names().contains("Transfer-Encoding"));
++ assertTrue(request.headers().contains("Transfer-Encoding", "chunked", false));
++ HttpContent content = channel.readInbound();
++ assertFalse(content.decoderResult().isFailure()); // We parse the content promised by the chunk length.
++ content.release();
++
++ content = channel.readInbound();
++ DecoderResult decoderResult = content.decoderResult();
++ assertTrue(decoderResult.isFailure()); // But then parsing the chunk delimiter must fail.
++ assertThat(decoderResult.cause(), instanceOf(InvalidChunkTerminationException.class));
++ content.release();
++ assertFalse(channel.finish());
++ }
++
++ @Test
+ public void testContentLengthHeaderAndChunked() {
+ String requestStr = "POST / HTTP/1.1\r\n" +
+ "Host: example.com\r\n" +
+--- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpResponseDecoderTest.java
++++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpResponseDecoderTest.java
+@@ -18,6 +18,7 @@
+ import io.netty.buffer.ByteBuf;
+ import io.netty.buffer.Unpooled;
+ import io.netty.channel.embedded.EmbeddedChannel;
++import io.netty.handler.codec.DecoderResult;
+ import io.netty.handler.codec.PrematureChannelClosureException;
+ import io.netty.handler.codec.TooLongFrameException;
+ import io.netty.util.CharsetUtil;
+@@ -672,6 +673,63 @@
+ }
+
+ @Test
++ public void mustRejectImproperlyTerminatedChunkExtensions() throws Exception {
++ // See full explanation: https://w4ke.info/2025/06/18/funky-chunks.html
++ String requestStr = "HTTP/1.1 200 OK\r\n" +
++ "Transfer-Encoding: chunked\r\n" +
++ "\r\n" +
++ "2;\n" + // Chunk size followed by illegal single newline (not preceded by carraige return)
++ "xx\r\n" +
++ "1D\r\n" +
++ "0\r\n\r\n" +
++ "HTTP/1.1 200 OK\r\n" +
++ "Transfer-Encoding: chunked\r\n\r\n" +
++ "0\r\n\r\n";
++ EmbeddedChannel channel = new EmbeddedChannel(new HttpResponseDecoder());
++ assertTrue(channel.writeInbound(Unpooled.copiedBuffer(requestStr, CharsetUtil.US_ASCII)));
++ HttpResponse response = channel.readInbound();
++ assertFalse(response.decoderResult().isFailure()); // We parse the headers just fine.
++ assertTrue(response.headers().names().contains("Transfer-Encoding"));
++ assertTrue(response.headers().contains("Transfer-Encoding", "chunked", false));
++ HttpContent content = channel.readInbound();
++ DecoderResult decoderResult = content.decoderResult();
++ assertTrue(decoderResult.isFailure()); // But parsing the chunk must fail.
++ assertThat(decoderResult.cause(), instanceOf(InvalidChunkExtensionException.class));
++ content.release();
++ assertFalse(channel.finish());
++ }
++
++ @Test
++ public void mustRejectImproperlyTerminatedChunkBodies() throws Exception {
++ // See full explanation: https://w4ke.info/2025/06/18/funky-chunks.html
++ String requestStr = "HTTP/1.1 200 OK\r\n" +
++ "Transfer-Encoding: chunked\r\n\r\n" +
++ "5\r\n" +
++ "AAAAXX" + // Chunk body contains extra (XX) bytes, and no CRLF terminator.
++ "1D\r\n" +
++ "0\r\n" +
++ "HTTP/1.1 200 OK\r\n" +
++ "Transfer-Encoding: chunked\r\n\r\n" +
++ "0\r\n\r\n";
++ EmbeddedChannel channel = new EmbeddedChannel(new HttpResponseDecoder());
++ assertTrue(channel.writeInbound(Unpooled.copiedBuffer(requestStr, CharsetUtil.US_ASCII)));
++ HttpResponse response = channel.readInbound();
++ assertFalse(response.decoderResult().isFailure()); // We parse the headers just fine.
++ assertTrue(response.headers().names().contains("Transfer-Encoding"));
++ assertTrue(response.headers().contains("Transfer-Encoding", "chunked", false));
++ HttpContent content = channel.readInbound();
++ assertFalse(content.decoderResult().isFailure()); // We parse the content promised by the chunk length.
++ content.release();
++
++ content = channel.readInbound();
++ DecoderResult decoderResult = content.decoderResult();
++ assertTrue(decoderResult.isFailure()); // But then parsing the chunk delimiter must fail.
++ assertThat(decoderResult.cause(), instanceOf(InvalidChunkTerminationException.class));
++ content.release();
++ assertFalse(channel.finish());
++ }
++
++ @Test
+ public void testConnectionClosedBeforeHeadersReceived() {
+ EmbeddedChannel channel = new EmbeddedChannel(new HttpResponseDecoder());
+ String responseInitialLine =
+@@ -718,7 +776,7 @@
+ EmbeddedChannel channel = new EmbeddedChannel(new HttpResponseDecoder());
+ String requestStr = "HTTP/1.1 200 OK\r\n" +
+ "Transfer-Encoding : chunked\r\n" +
+- "Host: netty.io\n\r\n";
++ "Host: netty.io\r\n\r\n";
+
+ assertTrue(channel.writeInbound(Unpooled.copiedBuffer(requestStr, CharsetUtil.US_ASCII)));
+ HttpResponse response = channel.readInbound();
+@@ -787,7 +845,7 @@
+ testHeaderNameEndsWithControlChar(0x0c);
+ }
+
+- private void testHeaderNameEndsWithControlChar(int controlChar) {
++ private static void testHeaderNameEndsWithControlChar(int controlChar) {
+ ByteBuf responseBuffer = Unpooled.buffer();
+ responseBuffer.writeCharSequence("HTTP/1.1 200 OK\r\n" +
+ "Host: netty.io\r\n", CharsetUtil.US_ASCII);
+--- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpRequestDecoder.java
++++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpRequestDecoder.java
+@@ -81,6 +81,15 @@
+ super(maxInitialLineLength, maxHeaderSize, maxChunkSize, true, validateHeaders, initialBufferSize);
+ }
+
++ public HttpRequestDecoder(
++ int maxInitialLineLength, int maxHeaderSize, int maxChunkSize, boolean validateHeaders,
++ int initialBufferSize, boolean strictLineParsing) {
++ super(maxInitialLineLength, maxHeaderSize, maxChunkSize, true,
++ validateHeaders, initialBufferSize,
++ HttpObjectDecoder.DEFAULT_ALLOW_DUPLICATE_CONTENT_LENGTHS,
++ strictLineParsing);
++ }
++
+ @Override
+ protected HttpMessage createMessage(String[] initialLine) throws Exception {
+ return new DefaultHttpRequest(
+--- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpResponseDecoder.java
++++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpResponseDecoder.java
+@@ -112,6 +112,15 @@
+ super(maxInitialLineLength, maxHeaderSize, maxChunkSize, true, validateHeaders, initialBufferSize);
+ }
+
++ public HttpResponseDecoder(
++ int maxInitialLineLength, int maxHeaderSize, int maxChunkSize, boolean validateHeaders,
++ int initialBufferSize, boolean strictLineParsing) {
++ super(maxInitialLineLength, maxHeaderSize, maxChunkSize, true,
++ validateHeaders, initialBufferSize,
++ HttpObjectDecoder.DEFAULT_ALLOW_DUPLICATE_CONTENT_LENGTHS,
++ strictLineParsing);
++ }
++
+ @Override
+ protected HttpMessage createMessage(String[] initialLine) {
+ return new DefaultHttpResponse(
=====================================
debian/patches/CVE-2025-58057.patch
=====================================
@@ -0,0 +1,930 @@
+From: Norman Maurer <norman_maurer at apple.com>
+Date: Wed, 3 Sep 2025 10:35:05 +0200
+Subject: [PATCH] Merge commit from fork (#15612)
+
+Motivation:
+
+We should ensure our decompressing decoders will fire their buffers
+through the pipeliner as fast as possible and so allow the user to take
+ownership of these as fast as possible. This is needed to reduce the
+risk of OOME as otherwise a small input might produce a large amount of
+data that can't be processed until all the data was decompressed in a
+loop. Beside this we also should ensure that other handlers that uses
+these decompressors will not buffer all of the produced data before
+processing it, which was true for HTTP and HTTP2.
+
+Modifications:
+
+- Adjust affected decoders (Brotli, Zstd and ZLib) to fire buffers
+through the pipeline as soon as possible
+- Adjust HTTP / HTTP2 decompressors to do the same
+- Add testcase.
+
+Result:
+
+Less risk of OOME when doing decompressing
+
+Co-authored-by: yawkat <jonas.konrad at oracle.com>
+
+origin: backport, https://github.com/netty/netty/commit/34894ac73b02efefeacd9c0972780b32dc3de04f
+---
+ .../handler/codec/http/HttpContentDecoder.java | 239 +++++++++++----------
+ .../codec/http/HttpContentDecompressorTest.java | 88 ++++++++
+ .../http2/DelegatingDecompressorFrameListener.java | 177 +++++++--------
+ .../handler/codec/compression/JZlibDecoder.java | 32 ++-
+ .../handler/codec/compression/JdkZlibDecoder.java | 45 ++--
+ .../codec/compression/AbstractIntegrationTest.java | 62 ++++++
+ 6 files changed, 416 insertions(+), 227 deletions(-)
+
+diff --git a/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentDecoder.java b/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentDecoder.java
+index d2513e4..3c43900 100644
+--- a/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentDecoder.java
++++ b/codec-http/src/main/java/io/netty/handler/codec/http/HttpContentDecoder.java
+@@ -17,6 +17,7 @@ package io.netty.handler.codec.http;
+
+ import io.netty.buffer.ByteBuf;
+ import io.netty.channel.ChannelHandlerContext;
++import io.netty.channel.ChannelInboundHandlerAdapter;
+ import io.netty.channel.embedded.EmbeddedChannel;
+ import io.netty.handler.codec.CodecException;
+ import io.netty.handler.codec.DecoderResult;
+@@ -52,125 +53,136 @@ public abstract class HttpContentDecoder extends MessageToMessageDecoder<HttpObj
+ private EmbeddedChannel decoder;
+ private boolean continueResponse;
+ private boolean needRead = true;
++ private ByteBufForwarder forwarder;
+
+ @Override
+ protected void decode(ChannelHandlerContext ctx, HttpObject msg, List<Object> out) throws Exception {
+- try {
+- if (msg instanceof HttpResponse && ((HttpResponse) msg).status().code() == 100) {
++ needRead = true;
++ if (msg instanceof HttpResponse && ((HttpResponse) msg).status().code() == 100) {
+
+- if (!(msg instanceof LastHttpContent)) {
+- continueResponse = true;
+- }
+- // 100-continue response must be passed through.
+- out.add(ReferenceCountUtil.retain(msg));
+- return;
++ if (!(msg instanceof LastHttpContent)) {
++ continueResponse = true;
+ }
++ // 100-continue response must be passed through.
++ needRead = false;
++ ctx.fireChannelRead(ReferenceCountUtil.retain(msg));
++ return;
++ }
+
+- if (continueResponse) {
+- if (msg instanceof LastHttpContent) {
+- continueResponse = false;
+- }
+- // 100-continue response must be passed through.
+- out.add(ReferenceCountUtil.retain(msg));
+- return;
++ if (continueResponse) {
++ if (msg instanceof LastHttpContent) {
++ continueResponse = false;
+ }
++ needRead = false;
++ ctx.fireChannelRead(ReferenceCountUtil.retain(msg));
++ return;
++ }
+
+- if (msg instanceof HttpMessage) {
+- cleanup();
+- final HttpMessage message = (HttpMessage) msg;
+- final HttpHeaders headers = message.headers();
++ if (msg instanceof HttpMessage) {
++ cleanup();
++ final HttpMessage message = (HttpMessage) msg;
++ final HttpHeaders headers = message.headers();
+
+- // Determine the content encoding.
+- String contentEncoding = headers.get(HttpHeaderNames.CONTENT_ENCODING);
+- if (contentEncoding != null) {
+- contentEncoding = contentEncoding.trim();
++ // Determine the content encoding.
++ String contentEncoding = headers.get(HttpHeaderNames.CONTENT_ENCODING);
++ if (contentEncoding != null) {
++ contentEncoding = contentEncoding.trim();
++ } else {
++ String transferEncoding = headers.get(HttpHeaderNames.TRANSFER_ENCODING);
++ if (transferEncoding != null) {
++ int idx = transferEncoding.indexOf(",");
++ if (idx != -1) {
++ contentEncoding = transferEncoding.substring(0, idx).trim();
++ } else {
++ contentEncoding = transferEncoding.trim();
++ }
+ } else {
+ contentEncoding = IDENTITY;
+ }
+- decoder = newContentDecoder(contentEncoding);
+-
+- if (decoder == null) {
+- if (message instanceof HttpContent) {
+- ((HttpContent) message).retain();
+- }
+- out.add(message);
+- return;
+- }
++ }
++ decoder = newContentDecoder(contentEncoding);
+
+- // Remove content-length header:
+- // the correct value can be set only after all chunks are processed/decoded.
+- // If buffering is not an issue, add HttpObjectAggregator down the chain, it will set the header.
+- // Otherwise, rely on LastHttpContent message.
+- if (headers.contains(HttpHeaderNames.CONTENT_LENGTH)) {
+- headers.remove(HttpHeaderNames.CONTENT_LENGTH);
+- headers.set(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED);
++ if (decoder == null) {
++ if (message instanceof HttpContent) {
++ ((HttpContent) message).retain();
+ }
+- // Either it is already chunked or EOF terminated.
+- // See https://github.com/netty/netty/issues/5892
++ needRead = false;
++ ctx.fireChannelRead(message);
++ return;
++ }
++ decoder.pipeline().addLast(forwarder);
+
+- // set new content encoding,
+- CharSequence targetContentEncoding = getTargetContentEncoding(contentEncoding);
+- if (HttpHeaderValues.IDENTITY.contentEquals(targetContentEncoding)) {
+- // Do NOT set the 'Content-Encoding' header if the target encoding is 'identity'
+- // as per: http://tools.ietf.org/html/rfc2616#section-14.11
+- headers.remove(HttpHeaderNames.CONTENT_ENCODING);
+- } else {
+- headers.set(HttpHeaderNames.CONTENT_ENCODING, targetContentEncoding);
+- }
++ // Remove content-length header:
++ // the correct value can be set only after all chunks are processed/decoded.
++ // If buffering is not an issue, add HttpObjectAggregator down the chain, it will set the header.
++ // Otherwise, rely on LastHttpContent message.
++ if (headers.contains(HttpHeaderNames.CONTENT_LENGTH)) {
++ headers.remove(HttpHeaderNames.CONTENT_LENGTH);
++ headers.set(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED);
++ }
++ // Either it is already chunked or EOF terminated.
++ // See https://github.com/netty/netty/issues/5892
+
+- if (message instanceof HttpContent) {
+- // If message is a full request or response object (headers + data), don't copy data part into out.
+- // Output headers only; data part will be decoded below.
+- // Note: "copy" object must not be an instance of LastHttpContent class,
+- // as this would (erroneously) indicate the end of the HttpMessage to other handlers.
+- HttpMessage copy;
+- if (message instanceof HttpRequest) {
+- HttpRequest r = (HttpRequest) message; // HttpRequest or FullHttpRequest
+- copy = new DefaultHttpRequest(r.protocolVersion(), r.method(), r.uri());
+- } else if (message instanceof HttpResponse) {
+- HttpResponse r = (HttpResponse) message; // HttpResponse or FullHttpResponse
+- copy = new DefaultHttpResponse(r.protocolVersion(), r.status());
+- } else {
+- throw new CodecException("Object of class " + message.getClass().getName() +
+- " is not an HttpRequest or HttpResponse");
+- }
+- copy.headers().set(message.headers());
+- copy.setDecoderResult(message.decoderResult());
+- out.add(copy);
+- } else {
+- out.add(message);
+- }
++ // set new content encoding,
++ CharSequence targetContentEncoding = getTargetContentEncoding(contentEncoding);
++ if (HttpHeaderValues.IDENTITY.contentEquals(targetContentEncoding)) {
++ // Do NOT set the 'Content-Encoding' header if the target encoding is 'identity'
++ // as per: https://tools.ietf.org/html/rfc2616#section-14.11
++ headers.remove(HttpHeaderNames.CONTENT_ENCODING);
++ } else {
++ headers.set(HttpHeaderNames.CONTENT_ENCODING, targetContentEncoding);
+ }
+
+- if (msg instanceof HttpContent) {
+- final HttpContent c = (HttpContent) msg;
+- if (decoder == null) {
+- out.add(c.retain());
++ if (message instanceof HttpContent) {
++ // If message is a full request or response object (headers + data), don't copy data part into out.
++ // Output headers only; data part will be decoded below.
++ // Note: "copy" object must not be an instance of LastHttpContent class,
++ // as this would (erroneously) indicate the end of the HttpMessage to other handlers.
++ HttpMessage copy;
++ if (message instanceof HttpRequest) {
++ HttpRequest r = (HttpRequest) message; // HttpRequest or FullHttpRequest
++ copy = new DefaultHttpRequest(r.protocolVersion(), r.method(), r.uri());
++ } else if (message instanceof HttpResponse) {
++ HttpResponse r = (HttpResponse) message; // HttpResponse or FullHttpResponse
++ copy = new DefaultHttpResponse(r.protocolVersion(), r.status());
+ } else {
+- decodeContent(c, out);
++ throw new CodecException("Object of class " + message.getClass().getName() +
++ " is not an HttpRequest or HttpResponse");
+ }
++ copy.headers().set(message.headers());
++ copy.setDecoderResult(message.decoderResult());
++ needRead = false;
++ ctx.fireChannelRead(copy);
++ } else {
++ needRead = false;
++ ctx.fireChannelRead(message);
+ }
+- } finally {
+- needRead = out.isEmpty();
+ }
+- }
+-
+- private void decodeContent(HttpContent c, List<Object> out) {
+- ByteBuf content = c.content();
+-
+- decode(content, out);
+-
+- if (c instanceof LastHttpContent) {
+- finishDecode(out);
+
+- LastHttpContent last = (LastHttpContent) c;
+- // Generate an additional chunk if the decoder produced
+- // the last product on closure,
+- HttpHeaders headers = last.trailingHeaders();
+- if (headers.isEmpty()) {
+- out.add(LastHttpContent.EMPTY_LAST_CONTENT);
++ if (msg instanceof HttpContent) {
++ final HttpContent c = (HttpContent) msg;
++ if (decoder == null) {
++ needRead = false;
++ ctx.fireChannelRead(c.retain());
+ } else {
+- out.add(new ComposedLastHttpContent(headers, DecoderResult.SUCCESS));
++ // call retain here as it will call release after its written to the channel
++ decoder.writeInbound(c.content().retain());
++
++ if (c instanceof LastHttpContent) {
++ boolean notEmpty = decoder.finish();
++ decoder = null;
++ assert !notEmpty;
++ LastHttpContent last = (LastHttpContent) c;
++ // Generate an additional chunk if the decoder produced
++ // the last product on closure,
++ HttpHeaders headers = last.trailingHeaders();
++ needRead = false;
++ if (headers.isEmpty()) {
++ ctx.fireChannelRead(LastHttpContent.EMPTY_LAST_CONTENT);
++ } else {
++ ctx.fireChannelRead(new ComposedLastHttpContent(headers, DecoderResult.SUCCESS));
++ }
++ }
+ }
+ }
+ }
+@@ -228,6 +240,7 @@ public abstract class HttpContentDecoder extends MessageToMessageDecoder<HttpObj
+ @Override
+ public void handlerAdded(ChannelHandlerContext ctx) throws Exception {
+ this.ctx = ctx;
++ forwarder = new ByteBufForwarder(ctx);
+ super.handlerAdded(ctx);
+ }
+
+@@ -249,30 +262,30 @@ public abstract class HttpContentDecoder extends MessageToMessageDecoder<HttpObj
+ }
+ }
+
+- private void decode(ByteBuf in, List<Object> out) {
+- // call retain here as it will call release after its written to the channel
+- decoder.writeInbound(in.retain());
+- fetchDecoderOutput(out);
+- }
++ private final class ByteBufForwarder extends ChannelInboundHandlerAdapter {
++
++ private final ChannelHandlerContext targetCtx;
+
+- private void finishDecode(List<Object> out) {
+- if (decoder.finish()) {
+- fetchDecoderOutput(out);
++ ByteBufForwarder(ChannelHandlerContext targetCtx) {
++ this.targetCtx = targetCtx;
+ }
+- decoder = null;
+- }
+
+- private void fetchDecoderOutput(List<Object> out) {
+- for (;;) {
+- ByteBuf buf = decoder.readInbound();
+- if (buf == null) {
+- break;
+- }
++ @Override
++ public boolean isSharable() {
++ // We need to mark the handler as sharable as we will add it to every EmbeddedChannel that is
++ // generated.
++ return true;
++ }
++
++ @Override
++ public void channelRead(ChannelHandlerContext ctx, Object msg) {
++ ByteBuf buf = (ByteBuf) msg;
+ if (!buf.isReadable()) {
+ buf.release();
+- continue;
++ return;
+ }
+- out.add(new DefaultHttpContent(buf));
++ needRead = false;
++ targetCtx.fireChannelRead(new DefaultHttpContent(buf));
+ }
+ }
+ }
+diff --git a/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentDecompressorTest.java b/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentDecompressorTest.java
+index 4a659fa..f54e98d 100644
+--- a/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentDecompressorTest.java
++++ b/codec-http/src/test/java/io/netty/handler/codec/http/HttpContentDecompressorTest.java
+@@ -15,6 +15,8 @@
+ */
+ package io.netty.handler.codec.http;
+
++import io.netty.buffer.ByteBuf;
++import io.netty.buffer.PooledByteBufAllocator;
+ import io.netty.buffer.Unpooled;
+ import io.netty.channel.ChannelHandlerContext;
+ import io.netty.channel.ChannelInboundHandlerAdapter;
+@@ -23,6 +25,8 @@
+ import org.junit.Assert;
+ import org.junit.Test;
+
++import java.util.ArrayList;
++import java.util.List;
+ import java.util.concurrent.atomic.AtomicInteger;
+
+ public class HttpContentDecompressorTest {
+@@ -67,4 +71,92 @@
+ Assert.assertEquals(2, readCalled.get());
+ Assert.assertFalse(channel.finishAndReleaseAll());
+ }
++
++ @Test
++ public void testZipBombGzip() {
++ testZipBomb("gzip");
++ }
++
++ @Test
++ public void testZipBombDeflate() {
++ testZipBomb("deflate");
++ }
++
++ @Test
++ public void testZipBombSnappy() {
++ testZipBomb("snappy");
++ }
++
++ private static void testZipBomb(String encoding) {
++ int chunkSize = 1024 * 1024;
++ int numberOfChunks = 256;
++ int memoryLimit = chunkSize * 128;
++
++ EmbeddedChannel compressionChannel = new EmbeddedChannel(new HttpContentCompressor());
++ DefaultFullHttpRequest req = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.GET, "/");
++ req.headers().set(HttpHeaderNames.ACCEPT_ENCODING, encoding);
++ compressionChannel.writeInbound(req);
++
++ DefaultHttpResponse response = new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK);
++ response.headers().set(HttpHeaderNames.TRANSFER_ENCODING, HttpHeaderValues.CHUNKED);
++ compressionChannel.writeOutbound(response);
++
++ for (int i = 0; i < numberOfChunks; i++) {
++ ByteBuf buffer = compressionChannel.alloc().buffer(chunkSize);
++ buffer.writeZero(chunkSize);
++ compressionChannel.writeOutbound(new DefaultHttpContent(buffer));
++ }
++ compressionChannel.writeOutbound(LastHttpContent.EMPTY_LAST_CONTENT);
++ compressionChannel.finish();
++ compressionChannel.releaseInbound();
++
++ ByteBuf compressed = compressionChannel.alloc().buffer();
++ HttpMessage message = null;
++ while (true) {
++ HttpObject obj = compressionChannel.readOutbound();
++ if (obj == null) {
++ break;
++ }
++ if (obj instanceof HttpMessage) {
++ message = (HttpMessage) obj;
++ }
++ if (obj instanceof HttpContent) {
++ HttpContent content = (HttpContent) obj;
++ compressed.writeBytes(content.content());
++ content.release();
++ }
++ }
++
++ PooledByteBufAllocator allocator = new PooledByteBufAllocator(false);
++
++ ZipBombIncomingHandler incomingHandler = new ZipBombIncomingHandler(memoryLimit);
++ EmbeddedChannel decompressChannel = new EmbeddedChannel(new HttpContentDecompressor(), incomingHandler);
++ decompressChannel.config().setAllocator(allocator);
++ decompressChannel.writeInbound(message);
++ decompressChannel.writeInbound(new DefaultLastHttpContent(compressed));
++
++ Assert.assertEquals((long) chunkSize * numberOfChunks, incomingHandler.total);
++ }
++
++ private static final class ZipBombIncomingHandler extends ChannelInboundHandlerAdapter {
++ final int memoryLimit;
++ long total;
++
++ ZipBombIncomingHandler(int memoryLimit) {
++ this.memoryLimit = memoryLimit;
++ }
++
++ @Override
++ public void channelRead(ChannelHandlerContext ctx, Object msg) {
++ PooledByteBufAllocator allocator = (PooledByteBufAllocator) ctx.alloc();
++ Assert.assertTrue(allocator.metric().usedHeapMemory() < memoryLimit);
++ Assert.assertTrue(allocator.metric().usedDirectMemory() < memoryLimit);
++
++ if (msg instanceof HttpContent) {
++ HttpContent buf = (HttpContent) msg;
++ total += buf.content().readableBytes();
++ buf.release();
++ }
++ }
++ }
+ }
+diff --git a/codec-http2/src/main/java/io/netty/handler/codec/http2/DelegatingDecompressorFrameListener.java b/codec-http2/src/main/java/io/netty/handler/codec/http2/DelegatingDecompressorFrameListener.java
+index 6793f28..af15318 100644
+--- a/codec-http2/src/main/java/io/netty/handler/codec/http2/DelegatingDecompressorFrameListener.java
++++ b/codec-http2/src/main/java/io/netty/handler/codec/http2/DelegatingDecompressorFrameListener.java
+@@ -17,6 +17,7 @@ package io.netty.handler.codec.http2;
+ import io.netty.buffer.ByteBuf;
+ import io.netty.buffer.Unpooled;
+ import io.netty.channel.ChannelHandlerContext;
++import io.netty.channel.ChannelInboundHandlerAdapter;
+ import io.netty.channel.embedded.EmbeddedChannel;
+ import io.netty.handler.codec.ByteToMessageDecoder;
+ import io.netty.handler.codec.compression.ZlibCodecFactory;
+@@ -63,7 +64,7 @@ public class DelegatingDecompressorFrameListener extends Http2FrameListenerDecor
+ public void onStreamRemoved(Http2Stream stream) {
+ final Http2Decompressor decompressor = decompressor(stream);
+ if (decompressor != null) {
+- cleanup(decompressor);
++ decompressor.cleanup();
+ }
+ }
+ });
+@@ -78,66 +79,7 @@ public class DelegatingDecompressorFrameListener extends Http2FrameListenerDecor
+ // The decompressor may be null if no compatible encoding type was found in this stream's headers
+ return listener.onDataRead(ctx, streamId, data, padding, endOfStream);
+ }
+-
+- final EmbeddedChannel channel = decompressor.decompressor();
+- final int compressedBytes = data.readableBytes() + padding;
+- decompressor.incrementCompressedBytes(compressedBytes);
+- try {
+- // call retain here as it will call release after its written to the channel
+- channel.writeInbound(data.retain());
+- ByteBuf buf = nextReadableBuf(channel);
+- if (buf == null && endOfStream && channel.finish()) {
+- buf = nextReadableBuf(channel);
+- }
+- if (buf == null) {
+- if (endOfStream) {
+- listener.onDataRead(ctx, streamId, Unpooled.EMPTY_BUFFER, padding, true);
+- }
+- // No new decompressed data was extracted from the compressed data. This means the application could
+- // not be provided with data and thus could not return how many bytes were processed. We will assume
+- // there is more data coming which will complete the decompression block. To allow for more data we
+- // return all bytes to the flow control window (so the peer can send more data).
+- decompressor.incrementDecompressedBytes(compressedBytes);
+- return compressedBytes;
+- }
+- try {
+- Http2LocalFlowController flowController = connection.local().flowController();
+- decompressor.incrementDecompressedBytes(padding);
+- for (;;) {
+- ByteBuf nextBuf = nextReadableBuf(channel);
+- boolean decompressedEndOfStream = nextBuf == null && endOfStream;
+- if (decompressedEndOfStream && channel.finish()) {
+- nextBuf = nextReadableBuf(channel);
+- decompressedEndOfStream = nextBuf == null;
+- }
+-
+- decompressor.incrementDecompressedBytes(buf.readableBytes());
+- // Immediately return the bytes back to the flow controller. ConsumedBytesConverter will convert
+- // from the decompressed amount which the user knows about to the compressed amount which flow
+- // control knows about.
+- flowController.consumeBytes(stream,
+- listener.onDataRead(ctx, streamId, buf, padding, decompressedEndOfStream));
+- if (nextBuf == null) {
+- break;
+- }
+-
+- padding = 0; // Padding is only communicated once on the first iteration.
+- buf.release();
+- buf = nextBuf;
+- }
+- // We consume bytes each time we call the listener to ensure if multiple frames are decompressed
+- // that the bytes are accounted for immediately. Otherwise the user may see an inconsistent state of
+- // flow control.
+- return 0;
+- } finally {
+- buf.release();
+- }
+- } catch (Http2Exception e) {
+- throw e;
+- } catch (Throwable t) {
+- throw streamError(stream.id(), INTERNAL_ERROR, t,
+- "Decompressor error detected while delegating data read on streamId %d", stream.id());
+- }
++ return decompressor.decompress(ctx, stream, data, padding, endOfStream);
+ }
+
+ @Override
+@@ -218,7 +160,7 @@ public class DelegatingDecompressorFrameListener extends Http2FrameListenerDecor
+ }
+ final EmbeddedChannel channel = newContentDecompressor(ctx, contentEncoding);
+ if (channel != null) {
+- decompressor = new Http2Decompressor(channel);
++ decompressor = new Http2Decompressor(channel, connection, listener);
+ stream.setProperty(propertyKey, decompressor);
+ // Decode the content and remove or replace the existing headers
+ // so that the message looks like a decoded message.
+@@ -250,36 +192,6 @@ public class DelegatingDecompressorFrameListener extends Http2FrameListenerDecor
+ return stream == null ? null : (Http2Decompressor) stream.getProperty(propertyKey);
+ }
+
+- /**
+- * Release remaining content from the {@link EmbeddedChannel}.
+- *
+- * @param decompressor The decompressor for {@code stream}
+- */
+- private static void cleanup(Http2Decompressor decompressor) {
+- decompressor.decompressor().finishAndReleaseAll();
+- }
+-
+- /**
+- * Read the next decompressed {@link ByteBuf} from the {@link EmbeddedChannel}
+- * or {@code null} if one does not exist.
+- *
+- * @param decompressor The channel to read from
+- * @return The next decoded {@link ByteBuf} from the {@link EmbeddedChannel} or {@code null} if one does not exist
+- */
+- private static ByteBuf nextReadableBuf(EmbeddedChannel decompressor) {
+- for (;;) {
+- final ByteBuf buf = decompressor.readInbound();
+- if (buf == null) {
+- return null;
+- }
+- if (!buf.isReadable()) {
+- buf.release();
+- continue;
+- }
+- return buf;
+- }
+- }
+-
+ /**
+ * A decorator around the local flow controller that converts consumed bytes from uncompressed to compressed.
+ */
+@@ -360,24 +272,93 @@ public class DelegatingDecompressorFrameListener extends Http2FrameListenerDecor
+ */
+ private static final class Http2Decompressor {
+ private final EmbeddedChannel decompressor;
++
+ private int compressed;
+ private int decompressed;
++ private Http2Stream stream;
++ private int padding;
++ private boolean dataDecompressed;
++ private ChannelHandlerContext targetCtx;
+
+- Http2Decompressor(EmbeddedChannel decompressor) {
++ Http2Decompressor(EmbeddedChannel decompressor,
++ final Http2Connection connection, final Http2FrameListener listener) {
+ this.decompressor = decompressor;
++ this.decompressor.pipeline().addLast(new ChannelInboundHandlerAdapter() {
++ @Override
++ public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception {
++ ByteBuf buf = (ByteBuf) msg;
++ if (!buf.isReadable()) {
++ buf.release();
++ return;
++ }
++ incrementDecompressedBytes(buf.readableBytes());
++ // Immediately return the bytes back to the flow controller. ConsumedBytesConverter will convert
++ // from the decompressed amount which the user knows about to the compressed amount which flow
++ // control knows about.
++ connection.local().flowController().consumeBytes(stream,
++ listener.onDataRead(targetCtx, stream.id(), buf, padding, false));
++ padding = 0; // Padding is only communicated once on the first iteration.
++ buf.release();
++
++ dataDecompressed = true;
++ }
++
++ @Override
++ public void channelInactive(ChannelHandlerContext ctx) throws Exception {
++ listener.onDataRead(targetCtx, stream.id(), Unpooled.EMPTY_BUFFER, padding, true);
++ }
++ });
+ }
+
+ /**
+- * Responsible for taking compressed bytes in and producing decompressed bytes.
++ * Release remaining content from the {@link EmbeddedChannel}.
+ */
+- EmbeddedChannel decompressor() {
+- return decompressor;
++ void cleanup() {
++ decompressor.finishAndReleaseAll();
+ }
+
++ int decompress(ChannelHandlerContext ctx, Http2Stream stream, ByteBuf data, int padding, boolean endOfStream)
++ throws Http2Exception {
++ final int compressedBytes = data.readableBytes() + padding;
++ incrementCompressedBytes(compressedBytes);
++ try {
++ this.stream = stream;
++ this.padding = padding;
++ this.dataDecompressed = false;
++ this.targetCtx = ctx;
++
++ // call retain here as it will call release after its written to the channel
++ decompressor.writeInbound(data.retain());
++ if (endOfStream) {
++ decompressor.finish();
++
++ if (!dataDecompressed) {
++ // No new decompressed data was extracted from the compressed data. This means the application
++ // could not be provided with data and thus could not return how many bytes were processed.
++ // We will assume there is more data coming which will complete the decompression block.
++ // To allow for more data we return all bytes to the flow control window (so the peer can
++ // send more data).
++ incrementDecompressedBytes(compressedBytes);
++ return compressedBytes;
++ }
++ }
++ // We consume bytes each time we call the listener to ensure if multiple frames are decompressed
++ // that the bytes are accounted for immediately. Otherwise the user may see an inconsistent state of
++ // flow control.
++ return 0;
++ } catch (Throwable t) {
++ // Http2Exception might be thrown by writeInbound(...) or finish().
++ if (t instanceof Http2Exception) {
++ throw (Http2Exception) t;
++ }
++ throw streamError(stream.id(), INTERNAL_ERROR, t,
++ "Decompressor error detected while delegating data read on streamId %d", stream.id());
++ }
++ }
+ /**
+ * Increment the number of bytes received prior to doing any decompression.
+ */
+- void incrementCompressedBytes(int delta) {
++ private void incrementCompressedBytes(int delta) {
+ assert delta >= 0;
+ compressed += delta;
+ }
+@@ -385,7 +366,7 @@ public class DelegatingDecompressorFrameListener extends Http2FrameListenerDecor
+ /**
+ * Increment the number of bytes after the decompression process.
+ */
+- void incrementDecompressedBytes(int delta) {
++ private void incrementDecompressedBytes(int delta) {
+ assert delta >= 0;
+ decompressed += delta;
+ }
+diff --git a/codec/src/main/java/io/netty/handler/codec/compression/JZlibDecoder.java b/codec/src/main/java/io/netty/handler/codec/compression/JZlibDecoder.java
+index 6c65cd5..9bc684e 100644
+--- a/codec/src/main/java/io/netty/handler/codec/compression/JZlibDecoder.java
++++ b/codec/src/main/java/io/netty/handler/codec/compression/JZlibDecoder.java
+@@ -28,6 +28,7 @@ public class JZlibDecoder extends ZlibDecoder {
+
+ private final Inflater z = new Inflater();
+ private byte[] dictionary;
++ private boolean needsRead;
+ private volatile boolean finished;
+
+ /**
+@@ -125,6 +126,7 @@ public class JZlibDecoder extends ZlibDecoder {
+
+ @Override
+ protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception {
++ needsRead = true;
+ if (finished) {
+ // Skip data received after finished.
+ in.skipBytes(in.readableBytes());
+@@ -166,6 +168,14 @@ public class JZlibDecoder extends ZlibDecoder {
+ int outputLength = z.next_out_index - oldNextOutIndex;
+ if (outputLength > 0) {
+ decompressed.writerIndex(decompressed.writerIndex() + outputLength);
++ if (maxAllocation == 0) {
++ // If we don't limit the maximum allocations we should just
++ // forward the buffer directly.
++ ByteBuf buffer = decompressed;
++ decompressed = null;
++ needsRead = false;
++ ctx.fireChannelRead(buffer);
++ }
+ }
+
+ switch (resultCode) {
+@@ -196,10 +206,13 @@ public class JZlibDecoder extends ZlibDecoder {
+ }
+ } finally {
+ in.skipBytes(z.next_in_index - oldNextInIndex);
+- if (decompressed.isReadable()) {
+- out.add(decompressed);
+- } else {
+- decompressed.release();
++ if (decompressed != null) {
++ if (decompressed.isReadable()) {
++ needsRead = false;
++ ctx.fireChannelRead(decompressed);
++ } else {
++ decompressed.release();
++ }
+ }
+ }
+ } finally {
+@@ -212,6 +225,17 @@ public class JZlibDecoder extends ZlibDecoder {
+ }
+ }
+
++ @Override
++ public void channelReadComplete(ChannelHandlerContext ctx) throws Exception {
++ // Discard bytes of the cumulation buffer if needed.
++ discardSomeReadBytes();
++
++ if (needsRead && !ctx.channel().config().isAutoRead()) {
++ ctx.read();
++ }
++ ctx.fireChannelReadComplete();
++ }
++
+ @Override
+ protected void decompressionBufferExhausted(ByteBuf buffer) {
+ finished = true;
+diff --git a/codec/src/main/java/io/netty/handler/codec/compression/JdkZlibDecoder.java b/codec/src/main/java/io/netty/handler/codec/compression/JdkZlibDecoder.java
+index 7e69422..426b84e 100644
+--- a/codec/src/main/java/io/netty/handler/codec/compression/JdkZlibDecoder.java
++++ b/codec/src/main/java/io/netty/handler/codec/compression/JdkZlibDecoder.java
+@@ -57,6 +57,7 @@ public class JdkZlibDecoder extends ZlibDecoder {
+ private GzipState gzipState = GzipState.HEADER_START;
+ private int flags = -1;
+ private int xlen = -1;
++ private boolean needsRead;
+
+ private volatile boolean finished;
+
+@@ -178,6 +179,7 @@ public class JdkZlibDecoder extends ZlibDecoder {
+
+ @Override
+ protected void decode(ChannelHandlerContext ctx, ByteBuf in, List<Object> out) throws Exception {
++ needsRead = true;
+ if (finished) {
+ // Skip data received after finished.
+ in.skipBytes(in.readableBytes());
+@@ -239,14 +241,20 @@ public class JdkZlibDecoder extends ZlibDecoder {
+ if (crc != null) {
+ crc.update(outArray, outIndex, outputLength);
+ }
+- } else {
+- if (inflater.needsDictionary()) {
+- if (dictionary == null) {
+- throw new DecompressionException(
+- "decompression failure, unable to set dictionary as non was specified");
+- }
+- inflater.setDictionary(dictionary);
++ if (maxAllocation == 0) {
++ // If we don't limit the maximum allocations we should just
++ // forward the buffer directly.
++ ByteBuf buffer = decompressed;
++ decompressed = null;
++ needsRead = false;
++ ctx.fireChannelRead(buffer);
++ }
++ } else if (inflater.needsDictionary()) {
++ if (dictionary == null) {
++ throw new DecompressionException(
++ "decompression failure, unable to set dictionary as non was specified");
+ }
++ inflater.setDictionary(dictionary);
+ }
+
+ if (inflater.finished()) {
+@@ -278,11 +286,13 @@ public class JdkZlibDecoder extends ZlibDecoder {
+ } catch (DataFormatException e) {
+ throw new DecompressionException("decompression failure", e);
+ } finally {
+-
+- if (decompressed.isReadable()) {
+- out.add(decompressed);
+- } else {
+- decompressed.release();
++ if (decompressed != null) {
++ if (decompressed.isReadable()) {
++ needsRead = false;
++ ctx.fireChannelRead(decompressed);
++ } else {
++ decompressed.release();
++ }
+ }
+ }
+ }
+@@ -454,4 +464,15 @@ public class JdkZlibDecoder extends ZlibDecoder {
+ return (cmf_flg & 0x7800) == 0x7800 &&
+ cmf_flg % 31 == 0;
+ }
++
++ @Override
++ public void channelReadComplete(ChannelHandlerContext ctx) throws Exception {
++ // Discard bytes of the cumulation buffer if needed.
++ discardSomeReadBytes();
++
++ if (needsRead && !ctx.channel().config().isAutoRead()) {
++ ctx.read();
++ }
++ ctx.fireChannelReadComplete();
++ }
+ }
+diff --git a/codec/src/test/java/io/netty/handler/codec/compression/AbstractIntegrationTest.java b/codec/src/test/java/io/netty/handler/codec/compression/AbstractIntegrationTest.java
+index 5eaed2f..dc05eb6 100644
+--- a/codec/src/test/java/io/netty/handler/codec/compression/AbstractIntegrationTest.java
++++ b/codec/src/test/java/io/netty/handler/codec/compression/AbstractIntegrationTest.java
+@@ -17,7 +17,10 @@
+
+ import io.netty.buffer.ByteBuf;
+ import io.netty.buffer.CompositeByteBuf;
++import io.netty.buffer.PooledByteBufAllocator;
+ import io.netty.buffer.Unpooled;
++import io.netty.channel.ChannelHandlerContext;
++import io.netty.channel.ChannelInboundHandlerAdapter;
+ import io.netty.channel.embedded.EmbeddedChannel;
+ import io.netty.util.CharsetUtil;
+ import io.netty.util.ReferenceCountUtil;
+@@ -166,4 +169,63 @@
+ decompressed.release();
+ in.release();
+ }
++
++ @Test
++ public void testHugeDecompress() {
++ int chunkSize = 1024 * 1024;
++ int numberOfChunks = 256;
++ int memoryLimit = chunkSize * 128;
++
++ EmbeddedChannel compressChannel = createEncoder();
++ ByteBuf compressed = compressChannel.alloc().buffer();
++ for (int i = 0; i <= numberOfChunks; i++) {
++ if (i < numberOfChunks) {
++ ByteBuf in = compressChannel.alloc().buffer(chunkSize);
++ in.writeZero(chunkSize);
++ compressChannel.writeOutbound(in);
++ } else {
++ compressChannel.close();
++ }
++ while (true) {
++ ByteBuf buf = compressChannel.readOutbound();
++ if (buf == null) {
++ break;
++ }
++ compressed.writeBytes(buf);
++ buf.release();
++ }
++ }
++
++ PooledByteBufAllocator allocator = new PooledByteBufAllocator(false);
++
++ HugeDecompressIncomingHandler endHandler = new HugeDecompressIncomingHandler(memoryLimit);
++ EmbeddedChannel decompressChannel = createDecoder();
++ decompressChannel.pipeline().addLast(endHandler);
++ decompressChannel.config().setAllocator(allocator);
++ decompressChannel.writeInbound(compressed);
++ decompressChannel.finishAndReleaseAll();
++ assertEquals((long) chunkSize * numberOfChunks, endHandler.total);
++ }
++
++ private static final class HugeDecompressIncomingHandler extends ChannelInboundHandlerAdapter {
++ final int memoryLimit;
++ long total;
++
++ HugeDecompressIncomingHandler(int memoryLimit) {
++ this.memoryLimit = memoryLimit;
++ }
++
++ @Override
++ public void channelRead(ChannelHandlerContext ctx, Object msg) {
++ ByteBuf buf = (ByteBuf) msg;
++ total += buf.readableBytes();
++ try {
++ PooledByteBufAllocator allocator = (PooledByteBufAllocator) ctx.alloc();
++ assertThat(allocator.metric().usedHeapMemory(), lessThan((long) memoryLimit));
++ assertThat(allocator.metric().usedDirectMemory(), lessThan((long) memoryLimit));
++ } finally {
++ buf.release();
++ }
++ }
++ }
+ }
=====================================
debian/patches/CVE-2025-59419 → debian/patches/CVE-2025-59419.patch
=====================================
@@ -32,11 +32,9 @@ bug-debian: https://bugs.debian.org/cgi-bin/bugreport.cgi?bug=1118282
3 files changed, 119 insertions(+)
create mode 100644 codec-smtp/src/test/java/io/netty/handler/codec/smtp/SmtpRequestsTest.java
-diff --git a/codec-smtp/src/main/java/io/netty/handler/codec/smtp/DefaultSmtpRequest.java b/codec-smtp/src/main/java/io/netty/handler/codec/smtp/DefaultSmtpRequest.java
-index 8f4d697..ae6acb4 100644
--- a/codec-smtp/src/main/java/io/netty/handler/codec/smtp/DefaultSmtpRequest.java
+++ b/codec-smtp/src/main/java/io/netty/handler/codec/smtp/DefaultSmtpRequest.java
-@@ -43,6 +43,7 @@ public final class DefaultSmtpRequest implements SmtpRequest {
+@@ -43,6 +43,7 @@
*/
public DefaultSmtpRequest(SmtpCommand command, CharSequence... parameters) {
this.command = ObjectUtil.checkNotNull(command, "command");
@@ -44,7 +42,7 @@ index 8f4d697..ae6acb4 100644
this.parameters = SmtpUtils.toUnmodifiableList(parameters);
}
-@@ -55,6 +56,7 @@ public final class DefaultSmtpRequest implements SmtpRequest {
+@@ -55,6 +56,7 @@
DefaultSmtpRequest(SmtpCommand command, List<CharSequence> parameters) {
this.command = ObjectUtil.checkNotNull(command, "command");
@@ -52,11 +50,9 @@ index 8f4d697..ae6acb4 100644
this.parameters = parameters != null ?
Collections.unmodifiableList(parameters) : Collections.<CharSequence>emptyList();
}
-diff --git a/codec-smtp/src/main/java/io/netty/handler/codec/smtp/SmtpUtils.java b/codec-smtp/src/main/java/io/netty/handler/codec/smtp/SmtpUtils.java
-index a2b84ea..6b84dc1 100644
--- a/codec-smtp/src/main/java/io/netty/handler/codec/smtp/SmtpUtils.java
+++ b/codec-smtp/src/main/java/io/netty/handler/codec/smtp/SmtpUtils.java
-@@ -28,5 +28,49 @@ final class SmtpUtils {
+@@ -28,5 +28,49 @@
return Collections.unmodifiableList(Arrays.asList(sequences));
}
@@ -106,9 +102,6 @@ index a2b84ea..6b84dc1 100644
+
private SmtpUtils() { }
}
-diff --git a/codec-smtp/src/test/java/io/netty/handler/codec/smtp/SmtpRequestsTest.java b/codec-smtp/src/test/java/io/netty/handler/codec/smtp/SmtpRequestsTest.java
-new file mode 100644
-index 0000000..f7b5b6a
--- /dev/null
+++ b/codec-smtp/src/test/java/io/netty/handler/codec/smtp/SmtpRequestsTest.java
@@ -0,0 +1,73 @@
=====================================
debian/patches/series
=====================================
@@ -23,8 +23,10 @@ CVE-2022-41881.patch
CVE-2022-41915.patch
CVE-2023-34462.patch
CVE-2023-44487.patch
-CVE-2025-59419
+CVE-2024-29025.patch
+CVE-2025-59419.patch
CVE-2025-55163_before-1.patch
CVE-2025-55163_1.patch
CVE-2025-55163_2.patch
+CVE-2025-58057.patch
CVE-2025-58056.patch
View it on GitLab: https://salsa.debian.org/java-team/netty/-/compare/3571b5bdbaf4b9df12ecb6a95482d69c627b5864...5670a38970b5553c8af1c8f22c23a115a62c4d27
--
View it on GitLab: https://salsa.debian.org/java-team/netty/-/compare/3571b5bdbaf4b9df12ecb6a95482d69c627b5864...5670a38970b5553c8af1c8f22c23a115a62c4d27
You're receiving this email because of your account on salsa.debian.org.
-------------- next part --------------
An HTML attachment was scrubbed...
URL: <http://alioth-lists.debian.net/pipermail/pkg-java-commits/attachments/20251129/5dac4ea0/attachment.htm>
More information about the pkg-java-commits
mailing list