[Pkg-rust-maintainers] Bug#988945: Bug#988945: rust-http: diff for NMU version 0.1.21-0.1
Sylvestre Ledru
sylvestre at debian.org
Sun Apr 10 21:15:47 BST 2022
Hello
Maybe you should join the team, commit there and follow the process for our packages? :)
You won't need to do NMU.
Cheers,
Sylvestre
Le 10/04/2022 à 21:42, Jonas Smedegaard a écrit :
> Control: tags 988945 + patch
> Control: tags 988945 + pending
>
> Dear maintainer,
>
> I've prepared an NMU for rust-http (versioned as 0.1.21-0.1) and
> uploaded it without delay, due to current package being completely broken.
>
>
> Regards,
>
> - Jonas
>
> diff -Nru rust-http-0.1.19/Cargo.toml rust-http-0.1.21/Cargo.toml
> --- rust-http-0.1.19/Cargo.toml 1970-01-01 01:00:00.000000000 +0100
> +++ rust-http-0.1.21/Cargo.toml 2019-12-02 20:18:55.000000000 +0100
> @@ -1,26 +1,38 @@
> -# THIS FILE IS AUTOMATICALLY GENERATED BY CARGO
> -#
> -# When uploading crates to the registry Cargo will automatically
> -# "normalize" Cargo.toml files for maximal compatibility
> -# with all versions of Cargo and also rewrite `path` dependencies
> -# to registry (e.g., crates.io) dependencies
> -#
> -# If you believe there's an error in this file please file an
> -# issue against the rust-lang/cargo repository. If you're
> -# editing this file be aware that the upstream Cargo.toml
> -# will likely look very different (and much more reasonable)
> -
> [package]
> name = "http"
> -version = "0.1.19"
> -authors = ["Alex Crichton <alex at alexcrichton.com>", "Carl Lerche <me at carllerche.com>", "Sean McArthur <sean at seanmonstar.com>"]
> -description = "A set of types for representing HTTP requests and responses.\n"
> -documentation = "https://docs.rs/http"
> +# When releasing to crates.io:
> +# - Update html_root_url in lib.rs.
> +# - Update CHANGELOG.md.
> +# - Create git tag
> +version = "0.1.21"
> readme = "README.md"
> +documentation = "https://docs.rs/http"
> +repository = "https://github.com/hyperium/http"
> +license = "MIT/Apache-2.0"
> +authors = [
> + "Alex Crichton <alex at alexcrichton.com>",
> + "Carl Lerche <me at carllerche.com>",
> + "Sean McArthur <sean at seanmonstar.com>",
> +]
> +description = """
> +A set of types for representing HTTP requests and responses.
> +"""
> keywords = ["http"]
> categories = ["web-programming"]
> -license = "MIT/Apache-2.0"
> -repository = "https://github.com/hyperium/http"
> +
> +[dependencies]
> +bytes = "0.4"
> +fnv = "1.0.5"
> +itoa = "0.4.1"
> +
> +[dev-dependencies]
> +indexmap = "1.0"
> +quickcheck = "0.6"
> +rand = "0.4"
> +seahash = "3.0.5"
> +serde = "1.0"
> +serde_json = "1.0"
> +doc-comment = "0.3"
>
> [[bench]]
> name = "header_map"
> @@ -37,31 +49,3 @@
> [[bench]]
> name = "uri"
> path = "benches/uri.rs"
> -[dependencies.bytes]
> -version = "0.4"
> -
> -[dependencies.fnv]
> -version = "1.0.5"
> -
> -[dependencies.itoa]
> -version = "0.4.1"
> -[dev-dependencies.doc-comment]
> -version = "0.3"
> -
> -[dev-dependencies.indexmap]
> -version = "1.0"
> -
> -[dev-dependencies.quickcheck]
> -version = "0.6"
> -
> -[dev-dependencies.rand]
> -version = "0.4"
> -
> -[dev-dependencies.seahash]
> -version = "3.0.5"
> -
> -[dev-dependencies.serde]
> -version = "1.0"
> -
> -[dev-dependencies.serde_json]
> -version = "1.0"
> diff -Nru rust-http-0.1.19/Cargo.toml.orig rust-http-0.1.21/Cargo.toml.orig
> --- rust-http-0.1.19/Cargo.toml.orig 2019-10-15 20:44:13.000000000 +0200
> +++ rust-http-0.1.21/Cargo.toml.orig 1970-01-01 01:00:00.000000000 +0100
> @@ -1,51 +0,0 @@
> -[package]
> -name = "http"
> -# When releasing to crates.io:
> -# - Update html_root_url in lib.rs.
> -# - Update CHANGELOG.md.
> -# - Create git tag
> -version = "0.1.19"
> -readme = "README.md"
> -documentation = "https://docs.rs/http"
> -repository = "https://github.com/hyperium/http"
> -license = "MIT/Apache-2.0"
> -authors = [
> - "Alex Crichton <alex at alexcrichton.com>",
> - "Carl Lerche <me at carllerche.com>",
> - "Sean McArthur <sean at seanmonstar.com>",
> -]
> -description = """
> -A set of types for representing HTTP requests and responses.
> -"""
> -keywords = ["http"]
> -categories = ["web-programming"]
> -
> -[dependencies]
> -bytes = "0.4"
> -fnv = "1.0.5"
> -itoa = "0.4.1"
> -
> -[dev-dependencies]
> -indexmap = "1.0"
> -quickcheck = "0.6"
> -rand = "0.4"
> -seahash = "3.0.5"
> -serde = "1.0"
> -serde_json = "1.0"
> -doc-comment = "0.3"
> -
> -[[bench]]
> -name = "header_map"
> -path = "benches/header_map/mod.rs"
> -
> -[[bench]]
> -name = "header_name"
> -path = "benches/header_name.rs"
> -
> -[[bench]]
> -name = "header_value"
> -path = "benches/header_value.rs"
> -
> -[[bench]]
> -name = "uri"
> -path = "benches/uri.rs"
> diff -Nru rust-http-0.1.19/.cargo_vcs_info.json rust-http-0.1.21/.cargo_vcs_info.json
> --- rust-http-0.1.19/.cargo_vcs_info.json 1970-01-01 01:00:00.000000000 +0100
> +++ rust-http-0.1.21/.cargo_vcs_info.json 1970-01-01 01:00:00.000000000 +0100
> @@ -1,5 +0,0 @@
> -{
> - "git": {
> - "sha1": "9c05e391e00474abaa8c14a86bcb0fc5eff1120e"
> - }
> -}
> diff -Nru rust-http-0.1.19/CHANGELOG.md rust-http-0.1.21/CHANGELOG.md
> --- rust-http-0.1.19/CHANGELOG.md 2019-10-15 20:45:44.000000000 +0200
> +++ rust-http-0.1.21/CHANGELOG.md 2019-12-02 20:18:55.000000000 +0100
> @@ -1,3 +1,14 @@
> +# 0.1.21 (December 2, 2019)
> +
> +* Fix `Method::is_idempotent` returning `false` for `PUT` and `DELETE.
> +
> +# 0.1.20 (November 26, 2019)
> +
> +* Fix possible double-free if `header::Drain` iterator is `std::mem::forgot`en (#357).
> +* Fix possible data race if multiple `header::ValueDrain`s are iterated on different threads (#362).
> +* Fix `HeaderMap::reserve` capacity overflows (#360).
> +* Fix parsing long authority-form `Uri`s (#351).
> +
> # 0.1.19 (October 15, 2019)
>
> * Allow `%` in IPv6 addresses in `Uri` (#343).
> diff -Nru rust-http-0.1.19/debian/changelog rust-http-0.1.21/debian/changelog
> --- rust-http-0.1.19/debian/changelog 2021-03-08 07:19:34.000000000 +0100
> +++ rust-http-0.1.21/debian/changelog 2022-04-10 21:36:10.000000000 +0200
> @@ -1,3 +1,28 @@
> +rust-http (0.1.21-0.1) unstable; urgency=medium
> +
> + * non-maintainer upload
> + * upgrade to new upstream release 0.2.21;
> + closes: bug#988945, thanks to Moritz Muehlenhoff;
> + CVE-2019-25009
> + * drop patch cherry-picked upstream now applied
> + * fix unsatisfiable dependencies and failure to build from source:
> + + add patches to use newer release of crate bytes
> + + build-depend and autopkgtest-depend
> + on librust-block-bytes-1+default-dev
> + (not older version gone since 2021-11-28)
> + + add patch to avoid crates quickcheck rand:
> + same-API quickcheck not in Debian;
> + see <https://github.com/BurntSushi/quickcheck/pull/271#issue-784946462=>
> + + add patch to avoid crate seahash:
> + used only for benchmark
> + + drop autopkgtest dependencies on
> + librust-quickcheck-1+default-dev
> + librust-rand-0.8+default-dev
> + librust-seahash-4+default-dev
> + (some of which were never in Debian)
> +
> + -- Jonas Smedegaard <dr at jones.dk> Sun, 10 Apr 2022 21:36:10 +0200
> +
> rust-http (0.1.19-2) unstable; urgency=medium
>
> * Package http 0.1.19 from crates.io using debcargo 2.4.3
> diff -Nru rust-http-0.1.19/debian/control rust-http-0.1.21/debian/control
> --- rust-http-0.1.19/debian/control 2021-03-08 07:19:34.000000000 +0100
> +++ rust-http-0.1.21/debian/control 2022-04-10 21:36:07.000000000 +0200
> @@ -6,7 +6,7 @@
> cargo:native <!nocheck>,
> rustc:native <!nocheck>,
> libstd-rust-dev <!nocheck>,
> - librust-bytes-0.4+default-dev <!nocheck>,
> + librust-bytes-1+default-dev <!nocheck>,
> librust-fnv-1+default-dev (>= 1.0.5-~~) <!nocheck>,
> librust-itoa-0.4+default-dev (>= 0.4.1-~~) <!nocheck>
> Maintainer: Debian Rust Maintainers <pkg-rust-maintainers at alioth-lists.debian.net>
> @@ -22,7 +22,7 @@
> Multi-Arch: same
> Depends:
> ${misc:Depends},
> - librust-bytes-0.4+default-dev,
> + librust-bytes-1+default-dev,
> librust-fnv-1+default-dev (>= 1.0.5-~~),
> librust-itoa-0.4+default-dev (>= 0.4.1-~~)
> Provides:
> @@ -31,8 +31,8 @@
> librust-http-0+default-dev (= ${binary:Version}),
> librust-http-0.1-dev (= ${binary:Version}),
> librust-http-0.1+default-dev (= ${binary:Version}),
> - librust-http-0.1.19-dev (= ${binary:Version}),
> - librust-http-0.1.19+default-dev (= ${binary:Version})
> + librust-http-0.1.21-dev (= ${binary:Version}),
> + librust-http-0.1.21+default-dev (= ${binary:Version})
> Description: Set of types for representing HTTP requests and responses - Rust source code
> This package contains the source for the Rust http crate, packaged by debcargo
> for use with cargo and dh-cargo.
> diff -Nru rust-http-0.1.19/debian/patches/avoid_quickcheck.patch rust-http-0.1.21/debian/patches/avoid_quickcheck.patch
> --- rust-http-0.1.19/debian/patches/avoid_quickcheck.patch 1970-01-01 01:00:00.000000000 +0100
> +++ rust-http-0.1.21/debian/patches/avoid_quickcheck.patch 2022-04-10 21:28:44.000000000 +0200
> @@ -0,0 +1,31 @@
> +Description: avoid crate quickcheck
> + Uses old release of quickcheck unavailable in Debian.
> +Author: Jonas Smedegaard <dr at jones.dk>
> +Last-Update: 2022-04-10
> +---
> +This patch header follows DEP-3: http://dep.debian.net/deps/dep3/
> +--- a/Cargo.toml
> ++++ b/Cargo.toml
> +@@ -27,8 +27,8 @@
> +
> + [dev-dependencies]
> + indexmap = "1.0"
> +-quickcheck = "0.6"
> +-rand = "0.4"
> ++#quickcheck = "0.6"
> ++#rand = "0.4"
> + seahash = "3.0.5"
> + serde = "1.0"
> + serde_json = "1.0"
> +--- a/tests/header_map_fuzz.rs
> ++++ b/tests/header_map_fuzz.rs
> +@@ -1,3 +1,4 @@
> ++/*
> + extern crate http;
> + extern crate rand;
> + extern crate quickcheck;
> +@@ -363,3 +364,4 @@
> +
> + String::from_utf8(bytes).unwrap()
> + }
> ++*/
> diff -Nru rust-http-0.1.19/debian/patches/avoid_seahash.patch rust-http-0.1.21/debian/patches/avoid_seahash.patch
> --- rust-http-0.1.19/debian/patches/avoid_seahash.patch 1970-01-01 01:00:00.000000000 +0100
> +++ rust-http-0.1.21/debian/patches/avoid_seahash.patch 2022-04-10 21:27:52.000000000 +0200
> @@ -0,0 +1,49 @@
> +Description: avoid crate seahash
> + Crate seahash is used only in a benchmark
> + that cannot build with stable rust.
> + .
> + While at it, disable all benchmarks,
> + as none of them can buil with stable rust.
> +Author: Jonas Smedegaard <dr at jones.dk>
> +Last-Update: 2022-04-10
> +---
> +This patch header follows DEP-3: http://dep.debian.net/deps/dep3/
> +--- a/Cargo.toml
> ++++ b/Cargo.toml
> +@@ -29,23 +29,23 @@
> + indexmap = "1.0"
> + #quickcheck = "0.6"
> + #rand = "0.4"
> +-seahash = "3.0.5"
> ++#seahash = "3.0.5"
> + serde = "1.0"
> + serde_json = "1.0"
> + doc-comment = "0.3"
> +
> +-[[bench]]
> +-name = "header_map"
> +-path = "benches/header_map/mod.rs"
> ++#[[bench]]
> ++#name = "header_map"
> ++#path = "benches/header_map/mod.rs"
> +
> +-[[bench]]
> +-name = "header_name"
> +-path = "benches/header_name.rs"
> ++#[[bench]]
> ++#name = "header_name"
> ++#path = "benches/header_name.rs"
> +
> +-[[bench]]
> +-name = "header_value"
> +-path = "benches/header_value.rs"
> ++#[[bench]]
> ++#name = "header_value"
> ++#path = "benches/header_value.rs"
> +
> +-[[bench]]
> +-name = "uri"
> +-path = "benches/uri.rs"
> ++#[[bench]]
> ++#name = "uri"
> ++#path = "benches/uri.rs"
> diff -Nru rust-http-0.1.19/debian/patches/fix-capacity-overflows-in-headermap-reserve.patch rust-http-0.1.21/debian/patches/fix-capacity-overflows-in-headermap-reserve.patch
> --- rust-http-0.1.19/debian/patches/fix-capacity-overflows-in-headermap-reserve.patch 2021-03-08 07:19:34.000000000 +0100
> +++ rust-http-0.1.21/debian/patches/fix-capacity-overflows-in-headermap-reserve.patch 1970-01-01 01:00:00.000000000 +0100
> @@ -1,53 +0,0 @@
> -From 81ceb611cf96abe91d91693e813cd5ee36cdae02 Mon Sep 17 00:00:00 2001
> -From: Sean McArthur <sean at seanmonstar.com>
> -Date: Mon, 25 Nov 2019 15:54:04 -0800
> -Subject: Fix capacity overflows in HeaderMap::reserve
> - The patch required minimal adaption from upstream because the surrounding
> - code had changed in upstream `master` branch over the 0.1.19 release.
> - .
> - Contrary to what one might assume with knowledge of `assert()` in C, the
> - rust `assert!()` macro never gets removed for optimization, and is always
> - checked resulting in a `panic!()` and thus a controlled shutdown of the
> - process as described in
> - https://doc.rust-lang.org/std/macro.assert.html#uses.
> -Origin: upstream, https://github.com/hyperium/http/commit/81ceb611cf96abe91d91693e813cd5ee36cdae02
> -Bug: https://github.com/hyperium/http/issues/352
> -Bug-Debian: https://bugs.debian.org/969896
> -
> ---- a/src/header/map.rs
> -+++ b/src/header/map.rs
> -@@ -628,6 +628,9 @@
> -
> - if cap > self.indices.len() {
> - let cap = cap.next_power_of_two();
> -+ assert!(cap < MAX_SIZE, "header map reserve over max capacity");
> -+ assert!(cap != 0, "header map reserve overflowed");
> -+
> -
> - if self.entries.len() == 0 {
> - self.mask = cap - 1;
> ---- a/tests/header_map.rs
> -+++ b/tests/header_map.rs
> -@@ -38,6 +38,22 @@
> - }
> -
> - #[test]
> -+#[should_panic]
> -+fn reserve_over_capacity() {
> -+ // See https://github.com/hyperium/http/issues/352
> -+ let mut headers = HeaderMap::<u32>::with_capacity(32);
> -+ headers.reserve(50_000); // over MAX_SIZE
> -+}
> -+
> -+#[test]
> -+#[should_panic]
> -+fn reserve_overflow() {
> -+ // See https://github.com/hyperium/http/issues/352
> -+ let mut headers = HeaderMap::<u32>::with_capacity(0);
> -+ headers.reserve(std::usize::MAX); // next_power_of_two overflows
> -+}
> -+
> -+#[test]
> - fn drain() {
> - let mut headers = HeaderMap::new();
> -
> diff -Nru rust-http-0.1.19/debian/patches/series rust-http-0.1.21/debian/patches/series
> --- rust-http-0.1.19/debian/patches/series 2021-03-08 07:19:34.000000000 +0100
> +++ rust-http-0.1.21/debian/patches/series 2022-04-10 21:08:42.000000000 +0200
> @@ -1 +1,4 @@
> -fix-capacity-overflows-in-headermap-reserve.patch
> +upgrade-to-bytes-0.5.patch
> +upgrade-to-bytes-1.patch
> +avoid_quickcheck.patch
> +avoid_seahash.patch
> diff -Nru rust-http-0.1.19/debian/patches/upgrade-to-bytes-0.5.patch rust-http-0.1.21/debian/patches/upgrade-to-bytes-0.5.patch
> --- rust-http-0.1.19/debian/patches/upgrade-to-bytes-0.5.patch 1970-01-01 01:00:00.000000000 +0100
> +++ rust-http-0.1.21/debian/patches/upgrade-to-bytes-0.5.patch 2022-04-10 19:40:20.000000000 +0200
> @@ -0,0 +1,207 @@
> +Description: Upgrade to bytes 0.5
> +Origin: upstream, https://github.com/hyperium/http/commit/43dffa1
> +Author: Sean McArthur <sean at seanmonstar.com>
> +Last-Update: 2022-03-29
> +---
> +This patch header follows DEP-3: http://dep.debian.net/deps/dep3/
> +--- a/Cargo.toml
> ++++ b/Cargo.toml
> +@@ -21,7 +21,7 @@
> + categories = ["web-programming"]
> +
> + [dependencies]
> +-bytes = "0.4"
> ++bytes = "0.5"
> + fnv = "1.0.5"
> + itoa = "0.4.1"
> +
> +--- a/src/byte_str.rs
> ++++ b/src/byte_str.rs
> +@@ -50,7 +50,7 @@
> + impl<'a> From<&'a str> for ByteStr {
> + #[inline]
> + fn from(src: &'a str) -> ByteStr {
> +- ByteStr { bytes: Bytes::from(src) }
> ++ ByteStr { bytes: Bytes::copy_from_slice(src.as_bytes()) }
> + }
> + }
> +
> +--- a/src/header/name.rs
> ++++ b/src/header/name.rs
> +@@ -1662,7 +1662,7 @@
> + match parse_hdr(src, &mut buf, &HEADER_CHARS)?.inner {
> + Repr::Standard(std) => Ok(std.into()),
> + Repr::Custom(MaybeLower { buf, lower: true }) => {
> +- let buf = Bytes::from(buf);
> ++ let buf = Bytes::copy_from_slice(buf);
> + let val = unsafe { ByteStr::from_utf8_unchecked(buf) };
> + Ok(Custom(val).into())
> + }
> +@@ -1677,7 +1677,7 @@
> + return Err(InvalidHeaderName::new());
> + }
> +
> +- dst.put(b);
> ++ dst.put_u8(b);
> + }
> +
> + let val = unsafe { ByteStr::from_utf8_unchecked(dst.freeze()) };
> +@@ -1711,7 +1711,7 @@
> + match parse_hdr(src, &mut buf, &HEADER_CHARS_H2)?.inner {
> + Repr::Standard(std) => Ok(std.into()),
> + Repr::Custom(MaybeLower { buf, lower: true }) => {
> +- let buf = Bytes::from(buf);
> ++ let buf = Bytes::copy_from_slice(buf);
> + let val = unsafe { ByteStr::from_utf8_unchecked(buf) };
> + Ok(Custom(val).into())
> + }
> +@@ -1722,7 +1722,7 @@
> + }
> + }
> +
> +- let buf = Bytes::from(buf);
> ++ let buf = Bytes::copy_from_slice(buf);
> + let val = unsafe { ByteStr::from_utf8_unchecked(buf) };
> + Ok(Custom(val).into())
> + }
> +@@ -2089,7 +2089,7 @@
> + }
> + Repr::Custom(maybe_lower) => {
> + if maybe_lower.lower {
> +- let buf = Bytes::from(&maybe_lower.buf[..]);
> ++ let buf = Bytes::copy_from_slice(&maybe_lower.buf[..]);
> + let byte_str = unsafe { ByteStr::from_utf8_unchecked(buf) };
> +
> + HeaderName {
> +@@ -2100,7 +2100,7 @@
> + let mut dst = BytesMut::with_capacity(maybe_lower.buf.len());
> +
> + for b in maybe_lower.buf.iter() {
> +- dst.put(HEADER_CHARS[*b as usize]);
> ++ dst.put_u8(HEADER_CHARS[*b as usize]);
> + }
> +
> + let buf = unsafe { ByteStr::from_utf8_unchecked(dst.freeze()) };
> +--- a/src/header/value.rs
> ++++ b/src/header/value.rs
> +@@ -104,7 +104,7 @@
> + /// ```
> + #[inline]
> + pub fn from_str(src: &str) -> Result<HeaderValue, InvalidHeaderValue> {
> +- HeaderValue::try_from(src)
> ++ HeaderValue::try_from_generic(src, |s| Bytes::copy_from_slice(s.as_bytes()))
> + }
> +
> + /// Converts a HeaderName into a HeaderValue
> +@@ -150,7 +150,7 @@
> + /// ```
> + #[inline]
> + pub fn from_bytes(src: &[u8]) -> Result<HeaderValue, InvalidHeaderValue> {
> +- HeaderValue::try_from(src)
> ++ HeaderValue::try_from_generic(src, Bytes::copy_from_slice)
> + }
> +
> + /// Attempt to convert a `Bytes` buffer to a `HeaderValue`.
> +@@ -163,7 +163,7 @@
> + /// implementation once the trait is stabilized in std.
> + #[inline]
> + pub fn from_shared(src: Bytes) -> Result<HeaderValue, InvalidHeaderValueBytes> {
> +- HeaderValue::try_from(src).map_err(InvalidHeaderValueBytes)
> ++ HeaderValue::try_from_generic(src, std::convert::identity).map_err(InvalidHeaderValueBytes)
> + }
> +
> + /// Convert a `Bytes` directly into a `HeaderValue` without validating.
> +@@ -189,7 +189,7 @@
> + }
> + }
> +
> +- fn try_from<T: AsRef<[u8]> + Into<Bytes>>(src: T) -> Result<HeaderValue, InvalidHeaderValue> {
> ++ fn try_from_generic<T: AsRef<[u8]>, F: FnOnce(T) -> Bytes>(src: T, into: F) -> Result<HeaderValue, InvalidHeaderValue> {
> + for &b in src.as_ref() {
> + if !is_valid(b) {
> + return Err(InvalidHeaderValue {
> +@@ -198,7 +198,7 @@
> + }
> + }
> + Ok(HeaderValue {
> +- inner: src.into(),
> ++ inner: into(src),
> + is_sensitive: false,
> + })
> + }
> +@@ -546,6 +546,15 @@
> + }
> + }
> +
> ++impl HttpTryFrom<Vec<u8>> for HeaderValue {
> ++ type Error = InvalidHeaderValueBytes;
> ++
> ++ #[inline]
> ++ fn try_from(vec: Vec<u8>) -> Result<Self, Self::Error> {
> ++ HeaderValue::from_shared(vec.into())
> ++ }
> ++}
> ++
> + impl HttpTryFrom<Bytes> for HeaderValue {
> + type Error = InvalidHeaderValueBytes;
> +
> +--- a/src/uri/authority.rs
> ++++ b/src/uri/authority.rs
> +@@ -1,6 +1,3 @@
> +-// Deprecated in 1.26, needed until our minimum version is >=1.23.
> +-#[allow(unused, deprecated)]
> +-use std::ascii::AsciiExt;
> + use std::{cmp, fmt, str};
> + use std::hash::{Hash, Hasher};
> + use std::str::FromStr;
> +@@ -457,7 +454,9 @@
> + }
> +
> + Ok(Authority {
> +- data: unsafe { ByteStr::from_utf8_unchecked(s.into()) },
> ++ data: unsafe {
> ++ ByteStr::from_utf8_unchecked(Bytes::copy_from_slice(s))
> ++ },
> + })
> + }
> + }
> +--- a/src/uri/mod.rs
> ++++ b/src/uri/mod.rs
> +@@ -888,7 +888,7 @@
> +
> + #[inline]
> + fn from_str(s: &str) -> Result<Uri, InvalidUri> {
> +- Uri::from_shared(s.into()).map_err(|e| e.0)
> ++ Uri::from_shared(Bytes::copy_from_slice(s.as_bytes())).map_err(|e| e.0)
> + }
> + }
> +
> +--- a/src/uri/path.rs
> ++++ b/src/uri/path.rs
> +@@ -296,7 +296,7 @@
> + type Error = InvalidUri;
> + #[inline]
> + fn try_from(s: &'a [u8]) -> Result<Self, Self::Error> {
> +- PathAndQuery::from_shared(s.into()).map_err(|e| e.0)
> ++ PathAndQuery::from_shared(Bytes::copy_from_slice(s)).map_err(|e| e.0)
> + }
> + }
> +
> +--- a/src/uri/scheme.rs
> ++++ b/src/uri/scheme.rs
> +@@ -1,6 +1,3 @@
> +-// Deprecated in 1.26, needed until our minimum version is >=1.23.
> +-#[allow(unused, deprecated)]
> +-use std::ascii::AsciiExt;
> + use std::fmt;
> + use std::hash::{Hash, Hasher};
> + use std::str::FromStr;
> +@@ -130,7 +127,7 @@
> + Other(_) => {
> + // Unsafe: parse_exact already checks for a strict subset of UTF-8
> + Ok(Other(Box::new(unsafe {
> +- ByteStr::from_utf8_unchecked(s.into())
> ++ ByteStr::from_utf8_unchecked(Bytes::copy_from_slice(s))
> + })).into())
> + }
> + }
> diff -Nru rust-http-0.1.19/debian/patches/upgrade-to-bytes-1.patch rust-http-0.1.21/debian/patches/upgrade-to-bytes-1.patch
> --- rust-http-0.1.19/debian/patches/upgrade-to-bytes-1.patch 1970-01-01 01:00:00.000000000 +0100
> +++ rust-http-0.1.21/debian/patches/upgrade-to-bytes-1.patch 2022-04-10 19:41:00.000000000 +0200
> @@ -0,0 +1,17 @@
> +Description: Upgrade to Bytes 1.0
> +Origin: upstream, https://github.com/hyperium/http/commit/95c338e
> +Author: Sean McArthur <sean at seanmonstar.com>
> +Last-Update: 2022-03-29
> +---
> +This patch header follows DEP-3: http://dep.debian.net/deps/dep3/
> +--- a/Cargo.toml
> ++++ b/Cargo.toml
> +@@ -21,7 +21,7 @@
> + categories = ["web-programming"]
> +
> + [dependencies]
> +-bytes = "0.5"
> ++bytes = "1"
> + fnv = "1.0.5"
> + itoa = "0.4.1"
> +
> diff -Nru rust-http-0.1.19/debian/tests/control rust-http-0.1.21/debian/tests/control
> --- rust-http-0.1.19/debian/tests/control 2021-03-08 07:19:34.000000000 +0100
> +++ rust-http-0.1.21/debian/tests/control 2022-04-10 21:08:24.000000000 +0200
> @@ -1,14 +1,14 @@
> -Test-Command: /usr/share/cargo/bin/cargo-auto-test http 0.1.19 --all-targets --all-features
> +Test-Command: /usr/share/cargo/bin/cargo-auto-test http 0.1.21 --all-targets --all-features
> Features: test-name=rust-http:@
> -Depends: dh-cargo (>= 18), librust-doc-comment-0.3+default-dev, librust-indexmap-1+default-dev, librust-quickcheck-0.6+default-dev, librust-rand-0.4+default-dev, librust-seahash-3+default-dev (>= 3.0.5-~~), librust-serde-1+default-dev, librust-serde-json-1+default-dev, @
> +Depends: dh-cargo (>= 18), librust-doc-comment-0.3+default-dev, librust-indexmap-1+default-dev, librust-serde-1+default-dev, librust-serde-json-1+default-dev, @
> Restrictions: allow-stderr, skip-not-installable
>
> -Test-Command: /usr/share/cargo/bin/cargo-auto-test http 0.1.19 --all-targets
> +Test-Command: /usr/share/cargo/bin/cargo-auto-test http 0.1.21 --all-targets
> Features: test-name=librust-http-dev:default
> -Depends: dh-cargo (>= 18), librust-doc-comment-0.3+default-dev, librust-indexmap-1+default-dev, librust-quickcheck-0.6+default-dev, librust-rand-0.4+default-dev, librust-seahash-3+default-dev (>= 3.0.5-~~), librust-serde-1+default-dev, librust-serde-json-1+default-dev, @
> +Depends: dh-cargo (>= 18), librust-doc-comment-0.3+default-dev, librust-indexmap-1+default-dev, librust-serde-1+default-dev, librust-serde-json-1+default-dev, @
> Restrictions: allow-stderr, skip-not-installable
>
> -Test-Command: /usr/share/cargo/bin/cargo-auto-test http 0.1.19 --all-targets --no-default-features
> +Test-Command: /usr/share/cargo/bin/cargo-auto-test http 0.1.21 --all-targets --no-default-features
> Features: test-name=librust-http-dev:
> -Depends: dh-cargo (>= 18), librust-doc-comment-0.3+default-dev, librust-indexmap-1+default-dev, librust-quickcheck-0.6+default-dev, librust-rand-0.4+default-dev, librust-seahash-3+default-dev (>= 3.0.5-~~), librust-serde-1+default-dev, librust-serde-json-1+default-dev, @
> +Depends: dh-cargo (>= 18), librust-doc-comment-0.3+default-dev, librust-indexmap-1+default-dev, librust-serde-1+default-dev, librust-serde-json-1+default-dev, @
> Restrictions: allow-stderr, skip-not-installable
> diff -Nru rust-http-0.1.19/src/header/map.rs rust-http-0.1.21/src/header/map.rs
> --- rust-http-0.1.19/src/header/map.rs 2019-07-26 19:21:06.000000000 +0200
> +++ rust-http-0.1.21/src/header/map.rs 2019-12-02 20:18:55.000000000 +0100
> @@ -135,7 +135,9 @@
> #[derive(Debug)]
> pub struct Drain<'a, T: 'a> {
> idx: usize,
> - map: *mut HeaderMap<T>,
> + len: usize,
> + entries: *mut [Bucket<T>],
> + extra_values: *mut Vec<ExtraValue<T>>,
> lt: PhantomData<&'a mut HeaderMap<T>>,
> }
>
> @@ -202,9 +204,8 @@
> /// An drain iterator of all values associated with a single header name.
> #[derive(Debug)]
> pub struct ValueDrain<'a, T: 'a> {
> - map: *mut HeaderMap<T>,
> first: Option<T>,
> - next: Option<usize>,
> + next: Option<::std::vec::IntoIter<T>>,
> lt: PhantomData<&'a mut HeaderMap<T>>,
> }
>
> @@ -270,6 +271,13 @@
> tail: usize,
> }
>
> +/// Access to the `links` value in a slice of buckets.
> +///
> +/// It's important that no other field is accessed, since it may have been
> +/// freed in a `Drain` iterator.
> +#[derive(Debug)]
> +struct RawLinks<T>(*mut [Bucket<T>]);
> +
> /// Node in doubly-linked list of header value entries
> #[derive(Debug, Clone)]
> struct ExtraValue<T> {
> @@ -628,6 +636,8 @@
>
> if cap > self.indices.len() {
> let cap = cap.next_power_of_two();
> + assert!(cap < MAX_SIZE, "header map reserve over max capacity");
> + assert!(cap != 0, "header map reserve overflowed");
>
> if self.entries.len() == 0 {
> self.mask = cap - 1;
> @@ -930,9 +940,23 @@
> *i = Pos::none();
> }
>
> + // Memory safety
> + //
> + // When the Drain is first created, it shortens the length of
> + // the source vector to make sure no uninitialized or moved-from
> + // elements are accessible at all if the Drain's destructor never
> + // gets to run.
> +
> + let entries = &mut self.entries[..] as *mut _;
> + let extra_values = &mut self.extra_values as *mut _;
> + let len = self.entries.len();
> + unsafe { self.entries.set_len(0); }
> +
> Drain {
> idx: 0,
> - map: self as *mut _,
> + len,
> + entries,
> + extra_values,
> lt: PhantomData,
> }
> }
> @@ -1136,10 +1160,17 @@
> links = entry.links.take();
> }
>
> + let raw_links = self.raw_links();
> + let extra_values = &mut self.extra_values;
> +
> + let next = links.map(|l| {
> + drain_all_extra_values(raw_links, extra_values, l.next)
> + .into_iter()
> + });
> +
> ValueDrain {
> - map: self as *mut _,
> first: Some(old),
> - next: links.map(|l| l.next),
> + next: next,
> lt: PhantomData,
> }
> }
> @@ -1364,124 +1395,8 @@
> /// Removes the `ExtraValue` at the given index.
> #[inline]
> fn remove_extra_value(&mut self, idx: usize) -> ExtraValue<T> {
> - let prev;
> - let next;
> -
> - {
> - debug_assert!(self.extra_values.len() > idx);
> - let extra = &self.extra_values[idx];
> - prev = extra.prev;
> - next = extra.next;
> - }
> -
> - // First unlink the extra value
> - match (prev, next) {
> - (Link::Entry(prev), Link::Entry(next)) => {
> - debug_assert_eq!(prev, next);
> - debug_assert!(self.entries.len() > prev);
> -
> - self.entries[prev].links = None;
> - }
> - (Link::Entry(prev), Link::Extra(next)) => {
> - debug_assert!(self.entries.len() > prev);
> - debug_assert!(self.entries[prev].links.is_some());
> -
> - self.entries[prev].links.as_mut().unwrap()
> - .next = next;
> -
> - debug_assert!(self.extra_values.len() > next);
> - self.extra_values[next].prev = Link::Entry(prev);
> - }
> - (Link::Extra(prev), Link::Entry(next)) => {
> - debug_assert!(self.entries.len() > next);
> - debug_assert!(self.entries[next].links.is_some());
> -
> - self.entries[next].links.as_mut().unwrap()
> - .tail = prev;
> -
> - debug_assert!(self.extra_values.len() > prev);
> - self.extra_values[prev].next = Link::Entry(next);
> - }
> - (Link::Extra(prev), Link::Extra(next)) => {
> - debug_assert!(self.extra_values.len() > next);
> - debug_assert!(self.extra_values.len() > prev);
> -
> - self.extra_values[prev].next = Link::Extra(next);
> - self.extra_values[next].prev = Link::Extra(prev);
> - }
> - }
> -
> - // Remove the extra value
> - let mut extra = self.extra_values.swap_remove(idx);
> -
> - // This is the index of the value that was moved (possibly `extra`)
> - let old_idx = self.extra_values.len();
> -
> - // Update the links
> - if extra.prev == Link::Extra(old_idx) {
> - extra.prev = Link::Extra(idx);
> - }
> -
> - if extra.next == Link::Extra(old_idx) {
> - extra.next = Link::Extra(idx);
> - }
> -
> - // Check if another entry was displaced. If it was, then the links
> - // need to be fixed.
> - if idx != old_idx {
> - let next;
> - let prev;
> -
> - {
> - debug_assert!(self.extra_values.len() > idx);
> - let moved = &self.extra_values[idx];
> - next = moved.next;
> - prev = moved.prev;
> - }
> -
> - // An entry was moved, we have to the links
> - match prev {
> - Link::Entry(entry_idx) => {
> - // It is critical that we do not attempt to read the
> - // header name or value as that memory may have been
> - // "released" already.
> - debug_assert!(self.entries.len() > entry_idx);
> - debug_assert!(self.entries[entry_idx].links.is_some());
> -
> - let links = self.entries[entry_idx].links.as_mut().unwrap();
> - links.next = idx;
> - }
> - Link::Extra(extra_idx) => {
> - debug_assert!(self.extra_values.len() > extra_idx);
> - self.extra_values[extra_idx].next = Link::Extra(idx);
> - }
> - }
> -
> - match next {
> - Link::Entry(entry_idx) => {
> - debug_assert!(self.entries.len() > entry_idx);
> - debug_assert!(self.entries[entry_idx].links.is_some());
> -
> - let links = self.entries[entry_idx].links.as_mut().unwrap();
> - links.tail = idx;
> - }
> - Link::Extra(extra_idx) => {
> - debug_assert!(self.extra_values.len() > extra_idx);
> - self.extra_values[extra_idx].prev = Link::Extra(idx);
> - }
> - }
> - }
> -
> - debug_assert!({
> - for v in &self.extra_values {
> - assert!(v.next != Link::Extra(old_idx));
> - assert!(v.prev != Link::Extra(old_idx));
> - }
> -
> - true
> - });
> -
> - extra
> + let raw_links = self.raw_links();
> + remove_extra_value(raw_links, &mut self.extra_values, idx)
> }
>
> fn remove_all_extra_values(&mut self, mut head: usize) {
> @@ -1631,6 +1546,145 @@
> let more = self.capacity() - self.entries.len();
> self.entries.reserve_exact(more);
> }
> +
> + #[inline]
> + fn raw_links(&mut self) -> RawLinks<T> {
> + RawLinks(&mut self.entries[..] as *mut _)
> + }
> +}
> +
> +/// Removes the `ExtraValue` at the given index.
> +#[inline]
> +fn remove_extra_value<T>(mut raw_links: RawLinks<T>, extra_values: &mut Vec<ExtraValue<T>>, idx: usize) -> ExtraValue<T> {
> + let prev;
> + let next;
> +
> + {
> + debug_assert!(extra_values.len() > idx);
> + let extra = &extra_values[idx];
> + prev = extra.prev;
> + next = extra.next;
> + }
> +
> + // First unlink the extra value
> + match (prev, next) {
> + (Link::Entry(prev), Link::Entry(next)) => {
> + debug_assert_eq!(prev, next);
> +
> + raw_links[prev] = None;
> + }
> + (Link::Entry(prev), Link::Extra(next)) => {
> + debug_assert!(raw_links[prev].is_some());
> +
> + raw_links[prev].as_mut().unwrap()
> + .next = next;
> +
> + debug_assert!(extra_values.len() > next);
> + extra_values[next].prev = Link::Entry(prev);
> + }
> + (Link::Extra(prev), Link::Entry(next)) => {
> + debug_assert!(raw_links[next].is_some());
> +
> + raw_links[next].as_mut().unwrap()
> + .tail = prev;
> +
> + debug_assert!(extra_values.len() > prev);
> + extra_values[prev].next = Link::Entry(next);
> + }
> + (Link::Extra(prev), Link::Extra(next)) => {
> + debug_assert!(extra_values.len() > next);
> + debug_assert!(extra_values.len() > prev);
> +
> + extra_values[prev].next = Link::Extra(next);
> + extra_values[next].prev = Link::Extra(prev);
> + }
> + }
> +
> + // Remove the extra value
> + let mut extra = extra_values.swap_remove(idx);
> +
> + // This is the index of the value that was moved (possibly `extra`)
> + let old_idx = extra_values.len();
> +
> + // Update the links
> + if extra.prev == Link::Extra(old_idx) {
> + extra.prev = Link::Extra(idx);
> + }
> +
> + if extra.next == Link::Extra(old_idx) {
> + extra.next = Link::Extra(idx);
> + }
> +
> + // Check if another entry was displaced. If it was, then the links
> + // need to be fixed.
> + if idx != old_idx {
> + let next;
> + let prev;
> +
> + {
> + debug_assert!(extra_values.len() > idx);
> + let moved = &extra_values[idx];
> + next = moved.next;
> + prev = moved.prev;
> + }
> +
> + // An entry was moved, we have to the links
> + match prev {
> + Link::Entry(entry_idx) => {
> + // It is critical that we do not attempt to read the
> + // header name or value as that memory may have been
> + // "released" already.
> + debug_assert!(raw_links[entry_idx].is_some());
> +
> + let links = raw_links[entry_idx].as_mut().unwrap();
> + links.next = idx;
> + }
> + Link::Extra(extra_idx) => {
> + debug_assert!(extra_values.len() > extra_idx);
> + extra_values[extra_idx].next = Link::Extra(idx);
> + }
> + }
> +
> + match next {
> + Link::Entry(entry_idx) => {
> + debug_assert!(raw_links[entry_idx].is_some());
> +
> + let links = raw_links[entry_idx].as_mut().unwrap();
> + links.tail = idx;
> + }
> + Link::Extra(extra_idx) => {
> + debug_assert!(extra_values.len() > extra_idx);
> + extra_values[extra_idx].prev = Link::Extra(idx);
> + }
> + }
> + }
> +
> + debug_assert!({
> + for v in &*extra_values {
> + assert!(v.next != Link::Extra(old_idx));
> + assert!(v.prev != Link::Extra(old_idx));
> + }
> +
> + true
> + });
> +
> + extra
> +}
> +
> +
> +fn drain_all_extra_values<T>(raw_links: RawLinks<T>, extra_values: &mut Vec<ExtraValue<T>>, mut head: usize) -> Vec<T> {
> + let mut vec = Vec::new();
> + loop {
> + let extra = remove_extra_value(raw_links, extra_values, head);
> + vec.push(extra.value);
> +
> + if let Link::Extra(idx) = extra.next {
> + head = idx;
> + } else {
> + break;
> + }
> + }
> + vec
> }
>
> impl<'a, T> IntoIterator for &'a HeaderMap<T> {
> @@ -1821,7 +1875,6 @@
>
> // As long as `HeaderName` is none, keep inserting the value into
> // the current entry
> - 'inner:
> loop {
> match iter.next() {
> Some((Some(k), v)) => {
> @@ -2102,7 +2155,7 @@
> fn next(&mut self) -> Option<Self::Item> {
> let idx = self.idx;
>
> - if idx == unsafe { (*self.map).entries.len() } {
> + if idx == self.len {
> return None;
> }
>
> @@ -2112,38 +2165,39 @@
> let value;
> let next;
>
> - unsafe {
> - let entry = &(*self.map).entries[idx];
> + let values = unsafe {
> + let entry = &(*self.entries)[idx];
>
> // Read the header name
> key = ptr::read(&entry.key as *const _);
> value = ptr::read(&entry.value as *const _);
> - next = entry.links.map(|l| l.next);
> - };
>
> - let values = ValueDrain {
> - map: self.map,
> - first: Some(value),
> - next: next,
> - lt: PhantomData,
> + let raw_links = RawLinks(self.entries);
> + let extra_values = &mut *self.extra_values;
> + next = entry.links.map(|l| {
> + drain_all_extra_values(raw_links, extra_values, l.next)
> + .into_iter()
> + });
> +
> + ValueDrain {
> + first: Some(value),
> + next,
> + lt: PhantomData,
> + }
> };
>
> Some((key, values))
> }
>
> fn size_hint(&self) -> (usize, Option<usize>) {
> - let lower = unsafe { (*self.map).entries.len() } - self.idx;
> + let lower = self.len - self.idx;
> (lower, Some(lower))
> }
> }
>
> impl<'a, T> Drop for Drain<'a, T> {
> fn drop(&mut self) {
> - unsafe {
> - let map = &mut *self.map;
> - debug_assert!(map.extra_values.is_empty());
> - map.entries.set_len(0);
> - }
> + for _ in self {}
> }
> }
>
> @@ -2860,10 +2914,16 @@
> /// returned.
> pub fn remove_entry_mult(self) -> (HeaderName, ValueDrain<'a, T>) {
> let entry = self.map.remove_found(self.probe, self.index);
> + let raw_links = self.map.raw_links();
> + let extra_values = &mut self.map.extra_values;
> +
> + let next = entry.links.map(|l| {
> + drain_all_extra_values(raw_links, extra_values, l.next)
> + .into_iter()
> + });
> let drain = ValueDrain {
> - map: self.map as *mut _,
> first: Some(entry.value),
> - next: entry.links.map(|l| l.next),
> + next,
> lt: PhantomData,
> };
> (entry.key, drain)
> @@ -2956,29 +3016,26 @@
> fn next(&mut self) -> Option<T> {
> if self.first.is_some() {
> self.first.take()
> - } else if let Some(next) = self.next {
> - // Remove the extra value
> - let extra = unsafe { &mut (*self.map) }.remove_extra_value(next);
> -
> - match extra.next {
> - Link::Extra(idx) => self.next = Some(idx),
> - Link::Entry(_) => self.next = None,
> - }
> -
> - Some(extra.value)
> + } else if let Some(ref mut extras) = self.next {
> + extras.next()
> } else {
> None
> }
> }
>
> fn size_hint(&self) -> (usize, Option<usize>) {
> - match (&self.first, self.next) {
> + match (&self.first, &self.next) {
> // Exactly 1
> - (&Some(_), None) => (1, Some(1)),
> - // At least 1
> - (&_, Some(_)) => (1, None),
> + (&Some(_), &None) => (1, Some(1)),
> + // 1 + extras
> + (&Some(_), &Some(ref extras)) => {
> + let (l, u) = extras.size_hint();
> + (l + 1, u.map(|u| u + 1))
> + },
> + // Extras only
> + (&None, &Some(ref extras)) => extras.size_hint(),
> // No more
> - (&None, None) => (0, Some(0)),
> + (&None, &None) => (0, Some(0)),
> }
> }
> }
> @@ -2993,6 +3050,34 @@
> unsafe impl<'a, T: Sync> Sync for ValueDrain<'a, T> {}
> unsafe impl<'a, T: Send> Send for ValueDrain<'a, T> {}
>
> +// ===== impl RawLinks =====
> +
> +impl<T> Clone for RawLinks<T> {
> + fn clone(&self) -> RawLinks<T> {
> + *self
> + }
> +}
> +
> +impl<T> Copy for RawLinks<T> {}
> +
> +impl<T> ops::Index<usize> for RawLinks<T> {
> + type Output = Option<Links>;
> +
> + fn index(&self, idx: usize) -> &Self::Output {
> + unsafe {
> + &(*self.0)[idx].links
> + }
> + }
> +}
> +
> +impl<T> ops::IndexMut<usize> for RawLinks<T> {
> + fn index_mut(&mut self, idx: usize) -> &mut Self::Output {
> + unsafe {
> + &mut (*self.0)[idx].links
> + }
> + }
> +}
> +
> // ===== impl Pos =====
>
> impl Pos {
> @@ -3198,7 +3283,7 @@
> use super::{Entry, HdrName, HeaderMap, HeaderName, InvalidHeaderName};
>
> /// A marker trait used to identify values that can be used as search keys
> - /// to a `HeaderMap`.
> + /// to a `HeaderMap`.
> pub trait AsHeaderName: Sealed {}
>
> // All methods are on this pub(super) trait, instead of `AsHeaderName`,
> diff -Nru rust-http-0.1.19/src/lib.rs rust-http-0.1.21/src/lib.rs
> --- rust-http-0.1.19/src/lib.rs 2019-10-15 20:44:22.000000000 +0200
> +++ rust-http-0.1.21/src/lib.rs 2019-12-02 20:18:55.000000000 +0100
> @@ -1,4 +1,4 @@
> -#![doc(html_root_url = "https://docs.rs/http/0.1.19")]
> +#![doc(html_root_url = "https://docs.rs/http/0.1.21")]
>
> //! A general purpose library of common HTTP types
> //!
> diff -Nru rust-http-0.1.19/src/method.rs rust-http-0.1.21/src/method.rs
> --- rust-http-0.1.19/src/method.rs 2019-10-15 20:43:45.000000000 +0200
> +++ rust-http-0.1.21/src/method.rs 2019-12-02 20:18:55.000000000 +0100
> @@ -219,13 +219,9 @@
> /// See [the spec](https://tools.ietf.org/html/rfc7231#section-4.2.2) for
> /// more words.
> pub fn is_idempotent(&self) -> bool {
> - if self.is_safe() {
> - true
> - } else {
> - match self.0 {
> - Put | Delete => true,
> - _ => false
> - }
> + match self.0 {
> + Put | Delete => true,
> + _ => self.is_safe(),
> }
> }
>
> @@ -427,3 +423,17 @@
> assert!(Method::from_str("").is_err());
> assert!(Method::from_bytes(b"").is_err());
> }
> +
> +#[test]
> +fn test_is_idempotent() {
> + assert!(Method::OPTIONS.is_idempotent());
> + assert!(Method::GET.is_idempotent());
> + assert!(Method::PUT.is_idempotent());
> + assert!(Method::DELETE.is_idempotent());
> + assert!(Method::HEAD.is_idempotent());
> + assert!(Method::TRACE.is_idempotent());
> +
> + assert!(!Method::POST.is_idempotent());
> + assert!(!Method::CONNECT.is_idempotent());
> + assert!(!Method::PATCH.is_idempotent());
> +}
> diff -Nru rust-http-0.1.19/src/uri/scheme.rs rust-http-0.1.21/src/uri/scheme.rs
> --- rust-http-0.1.19/src/uri/scheme.rs 2019-07-26 19:05:29.000000000 +0200
> +++ rust-http-0.1.21/src/uri/scheme.rs 2019-12-02 20:18:55.000000000 +0100
> @@ -336,10 +336,6 @@
> for i in 0..s.len() {
> let b = s[i];
>
> - if i == MAX_SCHEME_LEN {
> - return Err(ErrorKind::SchemeTooLong.into());
> - }
> -
> match SCHEME_CHARS[b as usize] {
> b':' => {
> // Not enough data remaining
> @@ -352,6 +348,10 @@
> break;
> }
>
> + if i > MAX_SCHEME_LEN {
> + return Err(ErrorKind::SchemeTooLong.into());
> + }
> +
> // Return scheme
> return Ok(Scheme2::Other(i));
> }
> diff -Nru rust-http-0.1.19/src/uri/tests.rs rust-http-0.1.21/src/uri/tests.rs
> --- rust-http-0.1.19/src/uri/tests.rs 2019-07-26 19:05:29.000000000 +0200
> +++ rust-http-0.1.21/src/uri/tests.rs 2019-12-02 20:18:55.000000000 +0100
> @@ -232,6 +232,30 @@
> }
>
> test_parse! {
> + test_uri_parse_long_host_with_no_scheme,
> + "thequickbrownfoxjumpedoverthelazydogtofindthelargedangerousdragon.localhost",
> + [],
> +
> + scheme_part = None,
> + authority_part = part!("thequickbrownfoxjumpedoverthelazydogtofindthelargedangerousdragon.localhost"),
> + path = "",
> + query = None,
> + port_part = None,
> +}
> +
> +test_parse! {
> + test_uri_parse_long_host_with_port_and_no_scheme,
> + "thequickbrownfoxjumpedoverthelazydogtofindthelargedangerousdragon.localhost:1234",
> + [],
> +
> + scheme_part = None,
> + authority_part = part!("thequickbrownfoxjumpedoverthelazydogtofindthelargedangerousdragon.localhost:1234"),
> + path = "",
> + query = None,
> + port_part = Port::from_str("1234").ok(),
> +}
> +
> +test_parse! {
> test_userinfo1,
> "http://a:b@127.0.0.1:1234/",
> [],
> @@ -430,7 +454,7 @@
> }
>
> #[test]
> -fn test_long_scheme() {
> +fn test_overflowing_scheme() {
> let mut uri = vec![];
> uri.extend(vec![b'a'; 256]);
> uri.extend(b"://localhost/");
> @@ -442,6 +466,18 @@
> }
>
> #[test]
> +fn test_max_length_scheme() {
> + let mut uri = vec![];
> + uri.extend(vec![b'a'; 64]);
> + uri.extend(b"://localhost/");
> +
> + let uri = String::from_utf8(uri).unwrap();
> + let uri: Uri = uri.parse().unwrap();
> +
> + assert_eq!(uri.scheme_str().unwrap().len(), 64);
> +}
> +
> +#[test]
> fn test_uri_to_path_and_query() {
> let cases = vec![
> ("/", "/"),
> diff -Nru rust-http-0.1.19/tests/header_map.rs rust-http-0.1.21/tests/header_map.rs
> --- rust-http-0.1.19/tests/header_map.rs 2019-07-26 19:05:29.000000000 +0200
> +++ rust-http-0.1.21/tests/header_map.rs 2019-12-02 20:18:55.000000000 +0100
> @@ -38,6 +38,22 @@
> }
>
> #[test]
> +#[should_panic]
> +fn reserve_over_capacity() {
> + // See https://github.com/hyperium/http/issues/352
> + let mut headers = HeaderMap::<u32>::with_capacity(32);
> + headers.reserve(50_000); // over MAX_SIZE
> +}
> +
> +#[test]
> +#[should_panic]
> +fn reserve_overflow() {
> + // See https://github.com/hyperium/http/issues/352
> + let mut headers = HeaderMap::<u32>::with_capacity(0);
> + headers.reserve(std::usize::MAX); // next_power_of_two overflows
> +}
> +
> +#[test]
> fn drain() {
> let mut headers = HeaderMap::new();
>
> @@ -87,6 +103,40 @@
> }
>
> #[test]
> +fn drain_drop_immediately() {
> + // test mem::forgetting does not double-free
> +
> + let mut headers = HeaderMap::new();
> + headers.insert("hello", "world".parse().unwrap());
> + headers.insert("zomg", "bar".parse().unwrap());
> + headers.append("hello", "world2".parse().unwrap());
> +
> + let iter = headers.drain();
> + assert_eq!(iter.size_hint(), (2, Some(2)));
> + // not consuming `iter`
> +}
> +
> +#[test]
> +fn drain_forget() {
> + // test mem::forgetting does not double-free
> +
> + let mut headers = HeaderMap::<HeaderValue>::new();
> + headers.insert("hello", "world".parse().unwrap());
> + headers.insert("zomg", "bar".parse().unwrap());
> +
> + assert_eq!(headers.len(), 2);
> +
> + {
> + let mut iter = headers.drain();
> + assert_eq!(iter.size_hint(), (2, Some(2)));
> + let _ = iter.next().unwrap();
> + std::mem::forget(iter);
> + }
> +
> + assert_eq!(headers.len(), 0);
> +}
> +
> +#[test]
> fn drain_entry() {
> let mut headers = HeaderMap::new();
>
> diff -Nru rust-http-0.1.19/util/Cargo.toml rust-http-0.1.21/util/Cargo.toml
> --- rust-http-0.1.19/util/Cargo.toml 1970-01-01 01:00:00.000000000 +0100
> +++ rust-http-0.1.21/util/Cargo.toml 2019-12-02 20:18:55.000000000 +0100
> @@ -0,0 +1,4 @@
> +[package]
> +name = "gen"
> +version = "0.1.0"
> +authors = ["Carl Lerche <me at carllerche.com>"]
> diff -Nru rust-http-0.1.19/util/README.md rust-http-0.1.21/util/README.md
> --- rust-http-0.1.19/util/README.md 1970-01-01 01:00:00.000000000 +0100
> +++ rust-http-0.1.21/util/README.md 2019-12-02 20:18:55.000000000 +0100
> @@ -0,0 +1 @@
> +Generates standard header code
> diff -Nru rust-http-0.1.19/util/src/main.rs rust-http-0.1.21/util/src/main.rs
> --- rust-http-0.1.19/util/src/main.rs 1970-01-01 01:00:00.000000000 +0100
> +++ rust-http-0.1.21/util/src/main.rs 2019-12-02 20:18:55.000000000 +0100
> @@ -0,0 +1,1040 @@
> +macro_rules! standard_headers {
> + (
> + $(
> + $doc:expr,
> + $name:expr;
> + )+
> + ) => {
> + const HEADERS: &[(&'static str, &'static str)] = &[
> + $(
> + ($doc, $name),
> + )+
> + ];
> + }
> +}
> +
> +standard_headers! {
> + r#"
> + /// Advertises which content types the client is able to understand.
> + ///
> + /// The Accept request HTTP header advertises which content types, expressed
> + /// as MIME types, the client is able to understand. Using content
> + /// negotiation, the server then selects one of the proposals, uses it and
> + /// informs the client of its choice with the Content-Type response header.
> + /// Browsers set adequate values for this header depending of the context
> + /// where the request is done: when fetching a CSS stylesheet a different
> + /// value is set for the request than when fetching an image, video or a
> + /// script.
> + "#,
> + "accept";
> +
> + r#"
> + /// Advertises which character set the client is able to understand.
> + ///
> + /// The Accept-Charset request HTTP header advertises which character set
> + /// the client is able to understand. Using content negotiation, the server
> + /// then selects one of the proposals, uses it and informs the client of its
> + /// choice within the Content-Type response header. Browsers usually don't
> + /// set this header as the default value for each content type is usually
> + /// correct and transmitting it would allow easier fingerprinting.
> + ///
> + /// If the server cannot serve any matching character set, it can
> + /// theoretically send back a 406 (Not Acceptable) error code. But, for a
> + /// better user experience, this is rarely done and the more common way is
> + /// to ignore the Accept-Charset header in this case.
> + "#,
> + "accept-charset";
> +
> + r#"
> + /// Advertises which content encoding the client is able to understand.
> + ///
> + /// The Accept-Encoding request HTTP header advertises which content
> + /// encoding, usually a compression algorithm, the client is able to
> + /// understand. Using content negotiation, the server selects one of the
> + /// proposals, uses it and informs the client of its choice with the
> + /// Content-Encoding response header.
> + ///
> + /// Even if both the client and the server supports the same compression
> + /// algorithms, the server may choose not to compress the body of a
> + /// response, if the identity value is also acceptable. Two common cases
> + /// lead to this:
> + ///
> + /// * The data to be sent is already compressed and a second compression
> + /// won't lead to smaller data to be transmitted. This may the case with
> + /// some image formats;
> + ///
> + /// * The server is overloaded and cannot afford the computational overhead
> + /// induced by the compression requirement. Typically, Microsoft recommends
> + /// not to compress if a server use more than 80 % of its computational
> + /// power.
> + ///
> + /// As long as the identity value, meaning no encryption, is not explicitly
> + /// forbidden, by an identity;q=0 or a *;q=0 without another explicitly set
> + /// value for identity, the server must never send back a 406 Not Acceptable
> + /// error.
> + "#,
> + "accept-encoding";
> +
> + r#"
> + /// Advertises which languages the client is able to understand.
> + ///
> + /// The Accept-Language request HTTP header advertises which languages the
> + /// client is able to understand, and which locale variant is preferred.
> + /// Using content negotiation, the server then selects one of the proposals,
> + /// uses it and informs the client of its choice with the Content-Language
> + /// response header. Browsers set adequate values for this header according
> + /// their user interface language and even if a user can change it, this
> + /// happens rarely (and is frown upon as it leads to fingerprinting).
> + ///
> + /// This header is a hint to be used when the server has no way of
> + /// determining the language via another way, like a specific URL, that is
> + /// controlled by an explicit user decision. It is recommended that the
> + /// server never overrides an explicit decision. The content of the
> + /// Accept-Language is often out of the control of the user (like when
> + /// traveling and using an Internet Cafe in a different country); the user
> + /// may also want to visit a page in another language than the locale of
> + /// their user interface.
> + ///
> + /// If the server cannot serve any matching language, it can theoretically
> + /// send back a 406 (Not Acceptable) error code. But, for a better user
> + /// experience, this is rarely done and more common way is to ignore the
> + /// Accept-Language header in this case.
> + "#,
> + "accept-language";
> +
> + r#"
> + /// Advertises which patch formats the server is able to understand.
> + ///
> + /// Accept-Patch should appear in the OPTIONS response for any resource that
> + /// supports the use of the PATCH method. The presence of the
> + /// Accept-Patch header in response to any method is an implicit indication
> + /// that PATCH is allowed on the resource identified by the URI. The
> + /// presence of a specific patch document format in this header indicates
> + /// that that specific format is allowed on the resource identified by the
> + /// URI.
> + "#,
> + "accept-patch";
> +
> + r#"
> + /// Marker used by the server to advertise partial request support.
> + ///
> + /// The Accept-Ranges response HTTP header is a marker used by the server to
> + /// advertise its support of partial requests. The value of this field
> + /// indicates the unit that can be used to define a range.
> + ///
> + /// In presence of an Accept-Ranges header, the browser may try to resume an
> + /// interrupted download, rather than to start it from the start again.
> + "#,
> + "accept-ranges";
> +
> + r#"
> + /// Preflight response indicating if the response to the request can be
> + /// exposed to the page.
> + ///
> + /// The Access-Control-Allow-Credentials response header indicates whether
> + /// or not the response to the request can be exposed to the page. It can be
> + /// exposed when the true value is returned; it can't in other cases.
> + ///
> + /// Credentials are cookies, authorization headers or TLS client
> + /// certificates.
> + ///
> + /// When used as part of a response to a preflight request, this indicates
> + /// whether or not the actual request can be made using credentials. Note
> + /// that simple GET requests are not preflighted, and so if a request is
> + /// made for a resource with credentials, if this header is not returned
> + /// with the resource, the response is ignored by the browser and not
> + /// returned to web content.
> + ///
> + /// The Access-Control-Allow-Credentials header works in conjunction with
> + /// the XMLHttpRequest.withCredentials property or with the credentials
> + /// option in the Request() constructor of the Fetch API. Credentials must
> + /// be set on both sides (the Access-Control-Allow-Credentials header and in
> + /// the XHR or Fetch request) in order for the CORS request with credentials
> + /// to succeed.
> + "#,
> + "access-control-allow-credentials";
> +
> + r#"
> + /// Preflight response indicating permitted HTTP headers.
> + ///
> + /// The Access-Control-Allow-Headers response header is used in response to
> + /// a preflight request to indicate which HTTP headers will be available via
> + /// Access-Control-Expose-Headers when making the actual request.
> + ///
> + /// The simple headers, Accept, Accept-Language, Content-Language,
> + /// Content-Type (but only with a MIME type of its parsed value (ignoring
> + /// parameters) of either application/x-www-form-urlencoded,
> + /// multipart/form-data, or text/plain), are always available and don't need
> + /// to be listed by this header.
> + ///
> + /// This header is required if the request has an
> + /// Access-Control-Request-Headers header.
> + "#,
> + "access-control-allow-headers";
> +
> + r#"
> + /// Preflight header response indicating permitted access methods.
> + ///
> + /// The Access-Control-Allow-Methods response header specifies the method or
> + /// methods allowed when accessing the resource in response to a preflight
> + /// request.
> + "#,
> + "access-control-allow-methods";
> +
> +
> + r#"
> + /// Indicates whether the response can be shared with resources with the
> + /// given origin.
> + "#,
> + "access-control-allow-origin";
> +
> + r#"
> + /// Indicates which headers can be exposed as part of the response by
> + /// listing their names.
> + "#,
> + "access-control-expose-headers";
> +
> + r#"
> + /// Indicates how long the results of a preflight request can be cached.
> + "#,
> + "access-control-max-age";
> +
> + r#"
> + /// Informs the server which HTTP headers will be used when an actual
> + /// request is made.
> + "#,
> + "access-control-request-headers";
> +
> + r#"
> + /// Informs the server know which HTTP method will be used when the actual
> + /// request is made.
> + "#,
> + "access-control-request-method";
> +
> + r#"
> + /// Indicates the time in seconds the object has been in a proxy cache.
> + ///
> + /// The Age header is usually close to zero. If it is Age: 0, it was
> + /// probably just fetched from the origin server; otherwise It is usually
> + /// calculated as a difference between the proxy's current date and the Date
> + /// general header included in the HTTP response.
> + "#,
> + "age";
> +
> + r#"
> + /// Lists the set of methods support by a resource.
> + ///
> + /// This header must be sent if the server responds with a 405 Method Not
> + /// Allowed status code to indicate which request methods can be used. An
> + /// empty Allow header indicates that the resource allows no request
> + /// methods, which might occur temporarily for a given resource, for
> + /// example.
> + "#,
> + "allow";
> +
> + r#"
> + /// Advertises the availability of alternate services to clients.
> + "#,
> + "alt-svc";
> +
> + r#"
> + /// Contains the credentials to authenticate a user agent with a server.
> + ///
> + /// Usually this header is included after the server has responded with a
> + /// 401 Unauthorized status and the WWW-Authenticate header.
> + "#,
> + "authorization";
> +
> + r#"
> + /// Specifies directives for caching mechanisms in both requests and
> + /// responses.
> + ///
> + /// Caching directives are unidirectional, meaning that a given directive in
> + /// a request is not implying that the same directive is to be given in the
> + /// response.
> + "#,
> + "cache-control";
> +
> + r#"
> + /// Controls whether or not the network connection stays open after the
> + /// current transaction finishes.
> + ///
> + /// If the value sent is keep-alive, the connection is persistent and not
> + /// closed, allowing for subsequent requests to the same server to be done.
> + ///
> + /// Except for the standard hop-by-hop headers (Keep-Alive,
> + /// Transfer-Encoding, TE, Connection, Trailer, Upgrade, Proxy-Authorization
> + /// and Proxy-Authenticate), any hop-by-hop headers used by the message must
> + /// be listed in the Connection header, so that the first proxy knows he has
> + /// to consume them and not to forward them further. Standard hop-by-hop
> + /// headers can be listed too (it is often the case of Keep-Alive, but this
> + /// is not mandatory.
> + "#,
> + "connection";
> +
> + r#"
> + /// Indicates if the content is expected to be displayed inline.
> + ///
> + /// In a regular HTTP response, the Content-Disposition response header is a
> + /// header indicating if the content is expected to be displayed inline in
> + /// the browser, that is, as a Web page or as part of a Web page, or as an
> + /// attachment, that is downloaded and saved locally.
> + ///
> + /// In a multipart/form-data body, the HTTP Content-Disposition general
> + /// header is a header that can be used on the subpart of a multipart body
> + /// to give information about the field it applies to. The subpart is
> + /// delimited by the boundary defined in the Content-Type header. Used on
> + /// the body itself, Content-Disposition has no effect.
> + ///
> + /// The Content-Disposition header is defined in the larger context of MIME
> + /// messages for e-mail, but only a subset of the possible parameters apply
> + /// to HTTP forms and POST requests. Only the value form-data, as well as
> + /// the optional directive name and filename, can be used in the HTTP
> + /// context.
> + "#,
> + "content-disposition";
> +
> + r#"
> + /// Used to compress the media-type.
> + ///
> + /// When present, its value indicates what additional content encoding has
> + /// been applied to the entity-body. It lets the client know, how to decode
> + /// in order to obtain the media-type referenced by the Content-Type header.
> + ///
> + /// It is recommended to compress data as much as possible and therefore to
> + /// use this field, but some types of resources, like jpeg images, are
> + /// already compressed. Sometimes using additional compression doesn't
> + /// reduce payload size and can even make the payload longer.
> + "#,
> + "content-encoding";
> +
> + r#"
> + /// Used to describe the languages intended for the audience.
> + ///
> + /// This header allows a user to differentiate according to the users' own
> + /// preferred language. For example, if "Content-Language: de-DE" is set, it
> + /// says that the document is intended for German language speakers
> + /// (however, it doesn't indicate the document is written in German. For
> + /// example, it might be written in English as part of a language course for
> + /// German speakers).
> + ///
> + /// If no Content-Language is specified, the default is that the content is
> + /// intended for all language audiences. Multiple language tags are also
> + /// possible, as well as applying the Content-Language header to various
> + /// media types and not only to textual documents.
> + "#,
> + "content-language";
> +
> + r#"
> + /// Indicates the size fo the entity-body.
> + ///
> + /// The header value must be a decimal indicating the number of octets sent
> + /// to the recipient.
> + "#,
> + "content-length";
> +
> + r#"
> + /// Indicates an alternate location for the returned data.
> + ///
> + /// The principal use case is to indicate the URL of the resource
> + /// transmitted as the result of content negotiation.
> + ///
> + /// Location and Content-Location are different: Location indicates the
> + /// target of a redirection (or the URL of a newly created document), while
> + /// Content-Location indicates the direct URL to use to access the resource,
> + /// without the need of further content negotiation. Location is a header
> + /// associated with the response, while Content-Location is associated with
> + /// the entity returned.
> + "#,
> + "content-location";
> +
> + r#"
> + /// Contains the MD5 digest of the entity-body.
> + ///
> + /// The Content-MD5 entity-header field, as defined in RFC 1864 [23], is an
> + /// MD5 digest of the entity-body for the purpose of providing an end-to-end
> + /// message integrity check (MIC) of the entity-body. (Note: a MIC is good
> + /// for detecting accidental modification of the entity-body in transit, but
> + /// is not proof against malicious attacks.)
> + "#,
> + "content-md5";
> +
> + r#"
> + /// Indicates where in a full body message a partial message belongs.
> + "#,
> + "content-range";
> +
> + r#"
> + /// Allows controlling resources the user agent is allowed to load for a
> + /// given page.
> + ///
> + /// With a few exceptions, policies mostly involve specifying server origins
> + /// and script endpoints. This helps guard against cross-site scripting
> + /// attacks (XSS).
> + "#,
> + "content-security-policy";
> +
> + r#"
> + /// Allows experimenting with policies by monitoring their effects.
> + ///
> + /// The HTTP Content-Security-Policy-Report-Only response header allows web
> + /// developers to experiment with policies by monitoring (but not enforcing)
> + /// their effects. These violation reports consist of JSON documents sent
> + /// via an HTTP POST request to the specified URI.
> + "#,
> + "content-security-policy-report-only";
> +
> + r#"
> + /// Used to indicate the media type of the resource.
> + ///
> + /// In responses, a Content-Type header tells the client what the content
> + /// type of the returned content actually is. Browsers will do MIME sniffing
> + /// in some cases and will not necessarily follow the value of this header;
> + /// to prevent this behavior, the header X-Content-Type-Options can be set
> + /// to nosniff.
> + ///
> + /// In requests, (such as POST or PUT), the client tells the server what
> + /// type of data is actually sent.
> + "#,
> + "content-type";
> +
> + r#"
> + /// Contains stored HTTP cookies previously sent by the server with the
> + /// Set-Cookie header.
> + ///
> + /// The Cookie header might be omitted entirely, if the privacy setting of
> + /// the browser are set to block them, for example.
> + "#,
> + "cookie";
> +
> + r#"
> + /// Indicates the client's tracking preference.
> + ///
> + /// This header lets users indicate whether they would prefer privacy rather
> + /// than personalized content.
> + "#,
> + "dnt";
> +
> + r#"
> + /// Contains the date and time at which the message was originated.
> + "#,
> + "date";
> +
> + r#"
> + /// Identifier for a specific version of a resource.
> + ///
> + /// This header allows caches to be more efficient, and saves bandwidth, as
> + /// a web server does not need to send a full response if the content has
> + /// not changed. On the other side, if the content has changed, etags are
> + /// useful to help prevent simultaneous updates of a resource from
> + /// overwriting each other ("mid-air collisions").
> + ///
> + /// If the resource at a given URL changes, a new Etag value must be
> + /// generated. Etags are therefore similar to fingerprints and might also be
> + /// used for tracking purposes by some servers. A comparison of them allows
> + /// to quickly determine whether two representations of a resource are the
> + /// same, but they might also be set to persist indefinitely by a tracking
> + /// server.
> + "#,
> + "etag";
> +
> + r#"
> + /// Indicates expectations that need to be fulfilled by the server in order
> + /// to properly handle the request.
> + ///
> + /// The only expectation defined in the specification is Expect:
> + /// 100-continue, to which the server shall respond with:
> + ///
> + /// * 100 if the information contained in the header is sufficient to cause
> + /// an immediate success,
> + ///
> + /// * 417 (Expectation Failed) if it cannot meet the expectation; or any
> + /// other 4xx status otherwise.
> + ///
> + /// For example, the server may reject a request if its Content-Length is
> + /// too large.
> + ///
> + /// No common browsers send the Expect header, but some other clients such
> + /// as cURL do so by default.
> + "#,
> + "expect";
> +
> + r#"
> + /// Contains the date/time after which the response is considered stale.
> + ///
> + /// Invalid dates, like the value 0, represent a date in the past and mean
> + /// that the resource is already expired.
> + ///
> + /// If there is a Cache-Control header with the "max-age" or "s-max-age"
> + /// directive in the response, the Expires header is ignored.
> + "#,
> + "expires";
> +
> + r#"
> + /// Contains information from the client-facing side of proxy servers that
> + /// is altered or lost when a proxy is involved in the path of the request.
> + ///
> + /// The alternative and de-facto standard versions of this header are the
> + /// X-Forwarded-For, X-Forwarded-Host and X-Forwarded-Proto headers.
> + ///
> + /// This header is used for debugging, statistics, and generating
> + /// location-dependent content and by design it exposes privacy sensitive
> + /// information, such as the IP address of the client. Therefore the user's
> + /// privacy must be kept in mind when deploying this header.
> + "#,
> + "forwarded";
> +
> + r#"
> + /// Contains an Internet email address for a human user who controls the
> + /// requesting user agent.
> + ///
> + /// If you are running a robotic user agent (e.g. a crawler), the From
> + /// header should be sent, so you can be contacted if problems occur on
> + /// servers, such as if the robot is sending excessive, unwanted, or invalid
> + /// requests.
> + "#,
> + "from";
> +
> + r#"
> + /// Specifies the domain name of the server and (optionally) the TCP port
> + /// number on which the server is listening.
> + ///
> + /// If no port is given, the default port for the service requested (e.g.,
> + /// "80" for an HTTP URL) is implied.
> + ///
> + /// A Host header field must be sent in all HTTP/1.1 request messages. A 400
> + /// (Bad Request) status code will be sent to any HTTP/1.1 request message
> + /// that lacks a Host header field or contains more than one.
> + "#,
> + "host";
> +
> + r#"
> + /// Makes a request conditional based on the E-Tag.
> + ///
> + /// For GET and HEAD methods, the server will send back the requested
> + /// resource only if it matches one of the listed ETags. For PUT and other
> + /// non-safe methods, it will only upload the resource in this case.
> + ///
> + /// The comparison with the stored ETag uses the strong comparison
> + /// algorithm, meaning two files are considered identical byte to byte only.
> + /// This is weakened when the W/ prefix is used in front of the ETag.
> + ///
> + /// There are two common use cases:
> + ///
> + /// * For GET and HEAD methods, used in combination with an Range header, it
> + /// can guarantee that the new ranges requested comes from the same resource
> + /// than the previous one. If it doesn't match, then a 416 (Range Not
> + /// Satisfiable) response is returned.
> + ///
> + /// * For other methods, and in particular for PUT, If-Match can be used to
> + /// prevent the lost update problem. It can check if the modification of a
> + /// resource that the user wants to upload will not override another change
> + /// that has been done since the original resource was fetched. If the
> + /// request cannot be fulfilled, the 412 (Precondition Failed) response is
> + /// returned.
> + "#,
> + "if-match";
> +
> + r#"
> + /// Makes a request conditional based on the modification date.
> + ///
> + /// The If-Modified-Since request HTTP header makes the request conditional:
> + /// the server will send back the requested resource, with a 200 status,
> + /// only if it has been last modified after the given date. If the request
> + /// has not been modified since, the response will be a 304 without any
> + /// body; the Last-Modified header will contain the date of last
> + /// modification. Unlike If-Unmodified-Since, If-Modified-Since can only be
> + /// used with a GET or HEAD.
> + ///
> + /// When used in combination with If-None-Match, it is ignored, unless the
> + /// server doesn't support If-None-Match.
> + ///
> + /// The most common use case is to update a cached entity that has no
> + /// associated ETag.
> + "#,
> + "if-modified-since";
> +
> + r#"
> + /// Makes a request conditional based on the E-Tag.
> + ///
> + /// The If-None-Match HTTP request header makes the request conditional. For
> + /// GET and HEAD methods, the server will send back the requested resource,
> + /// with a 200 status, only if it doesn't have an ETag matching the given
> + /// ones. For other methods, the request will be processed only if the
> + /// eventually existing resource's ETag doesn't match any of the values
> + /// listed.
> + ///
> + /// When the condition fails for GET and HEAD methods, then the server must
> + /// return HTTP status code 304 (Not Modified). For methods that apply
> + /// server-side changes, the status code 412 (Precondition Failed) is used.
> + /// Note that the server generating a 304 response MUST generate any of the
> + /// following header fields that would have been sent in a 200 (OK) response
> + /// to the same request: Cache-Control, Content-Location, Date, ETag,
> + /// Expires, and Vary.
> + ///
> + /// The comparison with the stored ETag uses the weak comparison algorithm,
> + /// meaning two files are considered identical not only if they are
> + /// identical byte to byte, but if the content is equivalent. For example,
> + /// two pages that would differ only by the date of generation in the footer
> + /// would be considered as identical.
> + ///
> + /// When used in combination with If-Modified-Since, it has precedence (if
> + /// the server supports it).
> + ///
> + /// There are two common use cases:
> + ///
> + /// * For `GET` and `HEAD` methods, to update a cached entity that has an associated ETag.
> + /// * For other methods, and in particular for `PUT`, `If-None-Match` used with
> + /// the `*` value can be used to save a file not known to exist,
> + /// guaranteeing that another upload didn't happen before, losing the data
> + /// of the previous put; this problems is the variation of the lost update
> + /// problem.
> + "#,
> + "if-none-match";
> +
> + r#"
> + /// Makes a request conditional based on range.
> + ///
> + /// The If-Range HTTP request header makes a range request conditional: if
> + /// the condition is fulfilled, the range request will be issued and the
> + /// server sends back a 206 Partial Content answer with the appropriate
> + /// body. If the condition is not fulfilled, the full resource is sent back,
> + /// with a 200 OK status.
> + ///
> + /// This header can be used either with a Last-Modified validator, or with
> + /// an ETag, but not with both.
> + ///
> + /// The most common use case is to resume a download, to guarantee that the
> + /// stored resource has not been modified since the last fragment has been
> + /// received.
> + "#,
> + "if-range";
> +
> + r#"
> + /// Makes the request conditional based on the last modification date.
> + ///
> + /// The If-Unmodified-Since request HTTP header makes the request
> + /// conditional: the server will send back the requested resource, or accept
> + /// it in the case of a POST or another non-safe method, only if it has not
> + /// been last modified after the given date. If the request has been
> + /// modified after the given date, the response will be a 412 (Precondition
> + /// Failed) error.
> + ///
> + /// There are two common use cases:
> + ///
> + /// * In conjunction non-safe methods, like POST, it can be used to
> + /// implement an optimistic concurrency control, like done by some wikis:
> + /// editions are rejected if the stored document has been modified since the
> + /// original has been retrieved.
> + ///
> + /// * In conjunction with a range request with a If-Range header, it can be
> + /// used to ensure that the new fragment requested comes from an unmodified
> + /// document.
> + "#,
> + "if-unmodified-since";
> +
> + r#"
> + /// Content-Types that are acceptable for the response.
> + "#,
> + "last-modified";
> +
> + r#"
> + /// Hint about how the connection and may be used to set a timeout and a
> + /// maximum amount of requests.
> + "#,
> + "keep-alive";
> +
> + r#"
> + /// Allows the server to point an interested client to another resource
> + /// containing metadata about the requested resource.
> + "#,
> + "link";
> +
> + r#"
> + /// Indicates the URL to redirect a page to.
> + ///
> + /// The Location response header indicates the URL to redirect a page to. It
> + /// only provides a meaning when served with a 3xx status response.
> + ///
> + /// The HTTP method used to make the new request to fetch the page pointed
> + /// to by Location depends of the original method and of the kind of
> + /// redirection:
> + ///
> + /// * If 303 (See Also) responses always lead to the use of a GET method,
> + /// 307 (Temporary Redirect) and 308 (Permanent Redirect) don't change the
> + /// method used in the original request;
> + ///
> + /// * 301 (Permanent Redirect) and 302 (Found) doesn't change the method
> + /// most of the time, though older user-agents may (so you basically don't
> + /// know).
> + ///
> + /// All responses with one of these status codes send a Location header.
> + ///
> + /// Beside redirect response, messages with 201 (Created) status also
> + /// include the Location header. It indicates the URL to the newly created
> + /// resource.
> + ///
> + /// Location and Content-Location are different: Location indicates the
> + /// target of a redirection (or the URL of a newly created resource), while
> + /// Content-Location indicates the direct URL to use to access the resource
> + /// when content negotiation happened, without the need of further content
> + /// negotiation. Location is a header associated with the response, while
> + /// Content-Location is associated with the entity returned.
> + "#,
> + "location";
> +
> + r#"
> + /// Indicates the max number of intermediaries the request should be sent
> + /// through.
> + "#,
> + "max-forwards";
> +
> + r#"
> + /// Indicates where a fetch originates from.
> + ///
> + /// It doesn't include any path information, but only the server name. It is
> + /// sent with CORS requests, as well as with POST requests. It is similar to
> + /// the Referer header, but, unlike this header, it doesn't disclose the
> + /// whole path.
> + "#,
> + "origin";
> +
> + r#"
> + /// HTTP/1.0 header usually used for backwards compatibility.
> + ///
> + /// The Pragma HTTP/1.0 general header is an implementation-specific header
> + /// that may have various effects along the request-response chain. It is
> + /// used for backwards compatibility with HTTP/1.0 caches where the
> + /// Cache-Control HTTP/1.1 header is not yet present.
> + "#,
> + "pragma";
> +
> + r#"
> + /// Defines the authentication method that should be used to gain access to
> + /// a proxy.
> + ///
> + /// Unlike `www-authenticate`, the `proxy-authenticate` header field applies
> + /// only to the next outbound client on the response chain. This is because
> + /// only the client that chose a given proxy is likely to have the
> + /// credentials necessary for authentication. However, when multiple proxies
> + /// are used within the same administrative domain, such as office and
> + /// regional caching proxies within a large corporate network, it is common
> + /// for credentials to be generated by the user agent and passed through the
> + /// hierarchy until consumed. Hence, in such a configuration, it will appear
> + /// as if Proxy-Authenticate is being forwarded because each proxy will send
> + /// the same challenge set.
> + ///
> + /// The `proxy-authenticate` header is sent along with a `407 Proxy
> + /// Authentication Required`.
> + "#,
> + "proxy-authenticate";
> +
> + r#"
> + /// Contains the credentials to authenticate a user agent to a proxy server.
> + ///
> + /// This header is usually included after the server has responded with a
> + /// 407 Proxy Authentication Required status and the Proxy-Authenticate
> + /// header.
> + "#,
> + "proxy-authorization";
> +
> + r#"
> + /// Associates a specific cryptographic public key with a certain server.
> + ///
> + /// This decreases the risk of MITM attacks with forged certificates. If one
> + /// or several keys are pinned and none of them are used by the server, the
> + /// browser will not accept the response as legitimate, and will not display
> + /// it.
> + "#,
> + "public-key-pins";
> +
> + r#"
> + /// Sends reports of pinning violation to the report-uri specified in the
> + /// header.
> + ///
> + /// Unlike `Public-Key-Pins`, this header still allows browsers to connect
> + /// to the server if the pinning is violated.
> + "#,
> + "public-key-pins-report-only";
> +
> + r#"
> + /// Indicates the part of a document that the server should return.
> + ///
> + /// Several parts can be requested with one Range header at once, and the
> + /// server may send back these ranges in a multipart document. If the server
> + /// sends back ranges, it uses the 206 Partial Content for the response. If
> + /// the ranges are invalid, the server returns the 416 Range Not Satisfiable
> + /// error. The server can also ignore the Range header and return the whole
> + /// document with a 200 status code.
> + "#,
> + "range";
> +
> + r#"
> + /// Contains the address of the previous web page from which a link to the
> + /// currently requested page was followed.
> + ///
> + /// The Referer header allows servers to identify where people are visiting
> + /// them from and may use that data for analytics, logging, or optimized
> + /// caching, for example.
> + "#,
> + "referer";
> +
> + r#"
> + /// Governs which referrer information should be included with requests
> + /// made.
> + "#,
> + "referrer-policy";
> +
> + r#"
> + /// Informs the web browser that the current page or frame should be
> + /// refreshed.
> + "#,
> + "refresh";
> +
> + r#"
> + /// The Retry-After response HTTP header indicates how long the user agent
> + /// should wait before making a follow-up request. There are two main cases
> + /// this header is used:
> + ///
> + /// * When sent with a 503 (Service Unavailable) response, it indicates how
> + /// long the service is expected to be unavailable.
> + ///
> + /// * When sent with a redirect response, such as 301 (Moved Permanently),
> + /// it indicates the minimum time that the user agent is asked to wait
> + /// before issuing the redirected request.
> + "#,
> + "retry-after";
> +
> + r#"
> + /// Contains information about the software used by the origin server to
> + /// handle the request.
> + ///
> + /// Overly long and detailed Server values should be avoided as they
> + /// potentially reveal internal implementation details that might make it
> + /// (slightly) easier for attackers to find and exploit known security
> + /// holes.
> + "#,
> + "server";
> +
> + r#"
> + /// Used to send cookies from the server to the user agent.
> + "#,
> + "set-cookie";
> +
> + r#"
> + /// Tells the client to communicate with HTTPS instead of using HTTP.
> + "#,
> + "strict-transport-security";
> +
> + r#"
> + /// Informs the server of transfer encodings willing to be accepted as part
> + /// of the response.
> + ///
> + /// See also the Transfer-Encoding response header for more details on
> + /// transfer encodings. Note that chunked is always acceptable for HTTP/1.1
> + /// recipients and you that don't have to specify "chunked" using the TE
> + /// header. However, it is useful for setting if the client is accepting
> + /// trailer fields in a chunked transfer coding using the "trailers" value.
> + "#,
> + "te";
> +
> + r#"
> + /// Indicates the tracking status that applied to the corresponding request.
> + "#,
> + "tk";
> +
> + r#"
> + /// Allows the sender to include additional fields at the end of chunked
> + /// messages.
> + "#,
> + "trailer";
> +
> + r#"
> + /// Specifies the form of encoding used to safely transfer the entity to the
> + /// client.
> + ///
> + /// `transfer-encoding` is a hop-by-hop header, that is applying to a
> + /// message between two nodes, not to a resource itself. Each segment of a
> + /// multi-node connection can use different `transfer-encoding` values. If
> + /// you want to compress data over the whole connection, use the end-to-end
> + /// header `content-encoding` header instead.
> + ///
> + /// When present on a response to a `HEAD` request that has no body, it
> + /// indicates the value that would have applied to the corresponding `GET`
> + /// message.
> + "#,
> + "transfer-encoding";
> +
> + r#"
> + /// A response to the client's tracking preference.
> + ///
> + /// A tracking status value (TSV) is a single character response to the
> + /// user's tracking preference with regard to data collected via the
> + /// designated resource. For a site-wide tracking status resource, the
> + /// designated resource is any resource on the same origin server. For a Tk
> + /// response header field, the target resource of the corresponding request
> + /// is the designated resource, and remains so for any subsequent
> + /// request-specific tracking status resource referred to by the Tk field
> + /// value.
> + "#,
> + "tsv";
> +
> + r#"
> + /// Contains a string that allows identifying the requesting client's
> + /// software.
> + "#,
> + "user-agent";
> +
> + r#"
> + /// Used as part of the exchange to upgrade the protocol.
> + "#,
> + "upgrade";
> +
> + r#"
> + /// Sends a signal to the server expressing the client’s preference for an
> + /// encrypted and authenticated response.
> + "#,
> + "upgrade-insecure-requests";
> +
> + r#"
> + /// Determines how to match future requests with cached responses.
> + ///
> + /// The `vary` HTTP response header determines how to match future request
> + /// headers to decide whether a cached response can be used rather than
> + /// requesting a fresh one from the origin server. It is used by the server
> + /// to indicate which headers it used when selecting a representation of a
> + /// resource in a content negotiation algorithm.
> + ///
> + /// The `vary` header should be set on a 304 Not Modified response exactly
> + /// like it would have been set on an equivalent 200 OK response.
> + "#,
> + "vary";
> +
> + r#"
> + /// Added by proxies to track routing.
> + ///
> + /// The `via` general header is added by proxies, both forward and reverse
> + /// proxies, and can appear in the request headers and the response headers.
> + /// It is used for tracking message forwards, avoiding request loops, and
> + /// identifying the protocol capabilities of senders along the
> + /// request/response chain.
> + "#,
> + "via";
> +
> + r#"
> + /// General HTTP header contains information about possible problems with
> + /// the status of the message.
> + ///
> + /// More than one `warning` header may appear in a response. Warning header
> + /// fields can in general be applied to any message, however some warn-codes
> + /// are specific to caches and can only be applied to response messages.
> + "#,
> + "warning";
> +
> + r#"
> + /// Defines the authentication method that should be used to gain access to
> + /// a resource.
> + "#,
> + "www-authenticate";
> +
> + r#"
> + /// Marker used by the server to indicate that the MIME types advertised in
> + /// the `content-type` headers should not be changed and be followed.
> + ///
> + /// This allows to opt-out of MIME type sniffing, or, in other words, it is
> + /// a way to say that the webmasters knew what they were doing.
> + ///
> + /// This header was introduced by Microsoft in IE 8 as a way for webmasters
> + /// to block content sniffing that was happening and could transform
> + /// non-executable MIME types into executable MIME types. Since then, other
> + /// browsers have introduced it, even if their MIME sniffing algorithms were
> + /// less aggressive.
> + ///
> + /// Site security testers usually expect this header to be set.
> + "#,
> + "x-content-type-options";
> +
> + r#"
> + /// Controls DNS prefetching.
> + ///
> + /// The `x-dns-prefetch-control` HTTP response header controls DNS
> + /// prefetching, a feature by which browsers proactively perform domain name
> + /// resolution on both links that the user may choose to follow as well as
> + /// URLs for items referenced by the document, including images, CSS,
> + /// JavaScript, and so forth.
> + ///
> + /// This prefetching is performed in the background, so that the DNS is
> + /// likely to have been resolved by the time the referenced items are
> + /// needed. This reduces latency when the user clicks a link.
> + "#,
> + "x-dns-prefetch-control";
> +
> + r#"
> + /// Indicates whether or not a browser should be allowed to render a page in
> + /// a frame.
> + ///
> + /// Sites can use this to avoid clickjacking attacks, by ensuring that their
> + /// content is not embedded into other sites.
> + ///
> + /// The added security is only provided if the user accessing the document
> + /// is using a browser supporting `x-frame-options`.
> + "#,
> + "x-frame-options";
> +
> + r#"
> + /// Stop pages from loading when an XSS attack is detected.
> + ///
> + /// The HTTP X-XSS-Protection response header is a feature of Internet
> + /// Explorer, Chrome and Safari that stops pages from loading when they
> + /// detect reflected cross-site scripting (XSS) attacks. Although these
> + /// protections are largely unnecessary in modern browsers when sites
> + /// implement a strong Content-Security-Policy that disables the use of
> + /// inline JavaScript ('unsafe-inline'), they can still provide protections
> + /// for users of older web browsers that don't yet support CSP.
> + "#,
> + "x-xss-protection";
> +}
> +
> +fn constantize(s: &str) -> String {
> + let parts = s.split("-").map(|s| {
> + s.chars().enumerate().map(|(n, c)| {
> + if n == 0 {
> + c.to_uppercase().to_string()
> + } else {
> + c.to_string()
> + }
> + })
> + });
> +
> + let mut res = String::new();
> +
> + for part in parts {
> + res.extend(part);
> + }
> +
> + res
> +}
> +
> +fn upcase(s: &str) -> String {
> + let mut ret = String::new();
> +
> + for ch in s.chars() {
> + if ch == '-' {
> + ret.push('_');
> + } else {
> + for ch in ch.to_uppercase() {
> + ret.push(ch);
> + }
> + }
> + }
> +
> + ret
> +}
> +
> +pub fn main() {
> + for &(doc, string) in HEADERS.iter() {
> + println!("{}", &doc[1..doc.len()-5]);
> + println!(" ({}, {}, {:?});", constantize(string), upcase(string), string);
> + println!("");
> + }
> +}
>
>
> _______________________________________________
> Pkg-rust-maintainers mailing list
> Pkg-rust-maintainers at alioth-lists.debian.net
> https://alioth-lists.debian.net/cgi-bin/mailman/listinfo/pkg-rust-maintainers
More information about the Pkg-rust-maintainers
mailing list