diff options
Diffstat (limited to 'vendor/golang.org/x/net/http2')
28 files changed, 2387 insertions, 1265 deletions
diff --git a/vendor/golang.org/x/net/http2/ciphers.go b/vendor/golang.org/x/net/http2/ciphers.go new file mode 100644 index 000000000..c9a0cf3b4 --- /dev/null +++ b/vendor/golang.org/x/net/http2/ciphers.go @@ -0,0 +1,641 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +// A list of the possible cipher suite ids. Taken from +// https://www.iana.org/assignments/tls-parameters/tls-parameters.txt + +const ( + cipher_TLS_NULL_WITH_NULL_NULL uint16 = 0x0000 + cipher_TLS_RSA_WITH_NULL_MD5 uint16 = 0x0001 + cipher_TLS_RSA_WITH_NULL_SHA uint16 = 0x0002 + cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0003 + cipher_TLS_RSA_WITH_RC4_128_MD5 uint16 = 0x0004 + cipher_TLS_RSA_WITH_RC4_128_SHA uint16 = 0x0005 + cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x0006 + cipher_TLS_RSA_WITH_IDEA_CBC_SHA uint16 = 0x0007 + cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0008 + cipher_TLS_RSA_WITH_DES_CBC_SHA uint16 = 0x0009 + cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x000A + cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000B + cipher_TLS_DH_DSS_WITH_DES_CBC_SHA uint16 = 0x000C + cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x000D + cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x000E + cipher_TLS_DH_RSA_WITH_DES_CBC_SHA uint16 = 0x000F + cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0010 + cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0011 + cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA uint16 = 0x0012 + cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0x0013 + cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0014 + cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA uint16 = 0x0015 + cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0x0016 + cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5 uint16 = 0x0017 + cipher_TLS_DH_anon_WITH_RC4_128_MD5 uint16 = 0x0018 + cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA uint16 = 0x0019 + cipher_TLS_DH_anon_WITH_DES_CBC_SHA uint16 = 0x001A + cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0x001B + // Reserved uint16 = 0x001C-1D + cipher_TLS_KRB5_WITH_DES_CBC_SHA uint16 = 0x001E + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA uint16 = 0x001F + cipher_TLS_KRB5_WITH_RC4_128_SHA uint16 = 0x0020 + cipher_TLS_KRB5_WITH_IDEA_CBC_SHA uint16 = 0x0021 + cipher_TLS_KRB5_WITH_DES_CBC_MD5 uint16 = 0x0022 + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5 uint16 = 0x0023 + cipher_TLS_KRB5_WITH_RC4_128_MD5 uint16 = 0x0024 + cipher_TLS_KRB5_WITH_IDEA_CBC_MD5 uint16 = 0x0025 + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA uint16 = 0x0026 + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA uint16 = 0x0027 + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA uint16 = 0x0028 + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5 uint16 = 0x0029 + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5 uint16 = 0x002A + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5 uint16 = 0x002B + cipher_TLS_PSK_WITH_NULL_SHA uint16 = 0x002C + cipher_TLS_DHE_PSK_WITH_NULL_SHA uint16 = 0x002D + cipher_TLS_RSA_PSK_WITH_NULL_SHA uint16 = 0x002E + cipher_TLS_RSA_WITH_AES_128_CBC_SHA uint16 = 0x002F + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0030 + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0031 + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA uint16 = 0x0032 + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0x0033 + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA uint16 = 0x0034 + cipher_TLS_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0035 + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0036 + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0037 + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA uint16 = 0x0038 + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0x0039 + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA uint16 = 0x003A + cipher_TLS_RSA_WITH_NULL_SHA256 uint16 = 0x003B + cipher_TLS_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003C + cipher_TLS_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x003D + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x003E + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x003F + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256 uint16 = 0x0040 + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0041 + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0042 + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0043 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0044 + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0045 + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA uint16 = 0x0046 + // Reserved uint16 = 0x0047-4F + // Reserved uint16 = 0x0050-58 + // Reserved uint16 = 0x0059-5C + // Unassigned uint16 = 0x005D-5F + // Reserved uint16 = 0x0060-66 + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0x0067 + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x0068 + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x0069 + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256 uint16 = 0x006A + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256 uint16 = 0x006B + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256 uint16 = 0x006C + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256 uint16 = 0x006D + // Unassigned uint16 = 0x006E-83 + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0084 + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0085 + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0086 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0087 + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0088 + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA uint16 = 0x0089 + cipher_TLS_PSK_WITH_RC4_128_SHA uint16 = 0x008A + cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008B + cipher_TLS_PSK_WITH_AES_128_CBC_SHA uint16 = 0x008C + cipher_TLS_PSK_WITH_AES_256_CBC_SHA uint16 = 0x008D + cipher_TLS_DHE_PSK_WITH_RC4_128_SHA uint16 = 0x008E + cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x008F + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0090 + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0091 + cipher_TLS_RSA_PSK_WITH_RC4_128_SHA uint16 = 0x0092 + cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0x0093 + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA uint16 = 0x0094 + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA uint16 = 0x0095 + cipher_TLS_RSA_WITH_SEED_CBC_SHA uint16 = 0x0096 + cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA uint16 = 0x0097 + cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA uint16 = 0x0098 + cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA uint16 = 0x0099 + cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA uint16 = 0x009A + cipher_TLS_DH_anon_WITH_SEED_CBC_SHA uint16 = 0x009B + cipher_TLS_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009C + cipher_TLS_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009D + cipher_TLS_DHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x009E + cipher_TLS_DHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x009F + cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0x00A0 + cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0x00A1 + cipher_TLS_DHE_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A2 + cipher_TLS_DHE_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A3 + cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256 uint16 = 0x00A4 + cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384 uint16 = 0x00A5 + cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256 uint16 = 0x00A6 + cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384 uint16 = 0x00A7 + cipher_TLS_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00A8 + cipher_TLS_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00A9 + cipher_TLS_DHE_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AA + cipher_TLS_DHE_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AB + cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256 uint16 = 0x00AC + cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384 uint16 = 0x00AD + cipher_TLS_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00AE + cipher_TLS_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00AF + cipher_TLS_PSK_WITH_NULL_SHA256 uint16 = 0x00B0 + cipher_TLS_PSK_WITH_NULL_SHA384 uint16 = 0x00B1 + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B2 + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B3 + cipher_TLS_DHE_PSK_WITH_NULL_SHA256 uint16 = 0x00B4 + cipher_TLS_DHE_PSK_WITH_NULL_SHA384 uint16 = 0x00B5 + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0x00B6 + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0x00B7 + cipher_TLS_RSA_PSK_WITH_NULL_SHA256 uint16 = 0x00B8 + cipher_TLS_RSA_PSK_WITH_NULL_SHA384 uint16 = 0x00B9 + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BA + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BB + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BC + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BD + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BE + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0x00BF + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C0 + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C1 + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C2 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C3 + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C4 + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256 uint16 = 0x00C5 + // Unassigned uint16 = 0x00C6-FE + cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV uint16 = 0x00FF + // Unassigned uint16 = 0x01-55,* + cipher_TLS_FALLBACK_SCSV uint16 = 0x5600 + // Unassigned uint16 = 0x5601 - 0xC000 + cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA uint16 = 0xC001 + cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA uint16 = 0xC002 + cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC003 + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC004 + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC005 + cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA uint16 = 0xC006 + cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA uint16 = 0xC007 + cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC008 + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA uint16 = 0xC009 + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA uint16 = 0xC00A + cipher_TLS_ECDH_RSA_WITH_NULL_SHA uint16 = 0xC00B + cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA uint16 = 0xC00C + cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC00D + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC00E + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC00F + cipher_TLS_ECDHE_RSA_WITH_NULL_SHA uint16 = 0xC010 + cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA uint16 = 0xC011 + cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC012 + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC013 + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC014 + cipher_TLS_ECDH_anon_WITH_NULL_SHA uint16 = 0xC015 + cipher_TLS_ECDH_anon_WITH_RC4_128_SHA uint16 = 0xC016 + cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA uint16 = 0xC017 + cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA uint16 = 0xC018 + cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA uint16 = 0xC019 + cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01A + cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01B + cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA uint16 = 0xC01C + cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA uint16 = 0xC01D + cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA uint16 = 0xC01E + cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA uint16 = 0xC01F + cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA uint16 = 0xC020 + cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA uint16 = 0xC021 + cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA uint16 = 0xC022 + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC023 + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC024 + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC025 + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC026 + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC027 + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC028 + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256 uint16 = 0xC029 + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384 uint16 = 0xC02A + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02B + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02C + cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02D + cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC02E + cipher_TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC02F + cipher_TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC030 + cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256 uint16 = 0xC031 + cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384 uint16 = 0xC032 + cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA uint16 = 0xC033 + cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA uint16 = 0xC034 + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA uint16 = 0xC035 + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA uint16 = 0xC036 + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256 uint16 = 0xC037 + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384 uint16 = 0xC038 + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA uint16 = 0xC039 + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256 uint16 = 0xC03A + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384 uint16 = 0xC03B + cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03C + cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03D + cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC03E + cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC03F + cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC040 + cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC041 + cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC042 + cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC043 + cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC044 + cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC045 + cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC046 + cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC047 + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC048 + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC049 + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04A + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04B + cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04C + cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04D + cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC04E + cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC04F + cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC050 + cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC051 + cipher_TLS_DHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC052 + cipher_TLS_DHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC053 + cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC054 + cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC055 + cipher_TLS_DHE_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC056 + cipher_TLS_DHE_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC057 + cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC058 + cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC059 + cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05A + cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05B + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05C + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05D + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC05E + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC05F + cipher_TLS_ECDHE_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC060 + cipher_TLS_ECDHE_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC061 + cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC062 + cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC063 + cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC064 + cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC065 + cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC066 + cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC067 + cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC068 + cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC069 + cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06A + cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06B + cipher_TLS_DHE_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06C + cipher_TLS_DHE_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06D + cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256 uint16 = 0xC06E + cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384 uint16 = 0xC06F + cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256 uint16 = 0xC070 + cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384 uint16 = 0xC071 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC072 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC073 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC074 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC075 + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC076 + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC077 + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC078 + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC079 + cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07A + cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07B + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07C + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07D + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC07E + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC07F + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC080 + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC081 + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC082 + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC083 + cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC084 + cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC085 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC086 + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC087 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC088 + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC089 + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08A + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08B + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08C + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08D + cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC08E + cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC08F + cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC090 + cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC091 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256 uint16 = 0xC092 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384 uint16 = 0xC093 + cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC094 + cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC095 + cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC096 + cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC097 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC098 + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC099 + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256 uint16 = 0xC09A + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384 uint16 = 0xC09B + cipher_TLS_RSA_WITH_AES_128_CCM uint16 = 0xC09C + cipher_TLS_RSA_WITH_AES_256_CCM uint16 = 0xC09D + cipher_TLS_DHE_RSA_WITH_AES_128_CCM uint16 = 0xC09E + cipher_TLS_DHE_RSA_WITH_AES_256_CCM uint16 = 0xC09F + cipher_TLS_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A0 + cipher_TLS_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A1 + cipher_TLS_DHE_RSA_WITH_AES_128_CCM_8 uint16 = 0xC0A2 + cipher_TLS_DHE_RSA_WITH_AES_256_CCM_8 uint16 = 0xC0A3 + cipher_TLS_PSK_WITH_AES_128_CCM uint16 = 0xC0A4 + cipher_TLS_PSK_WITH_AES_256_CCM uint16 = 0xC0A5 + cipher_TLS_DHE_PSK_WITH_AES_128_CCM uint16 = 0xC0A6 + cipher_TLS_DHE_PSK_WITH_AES_256_CCM uint16 = 0xC0A7 + cipher_TLS_PSK_WITH_AES_128_CCM_8 uint16 = 0xC0A8 + cipher_TLS_PSK_WITH_AES_256_CCM_8 uint16 = 0xC0A9 + cipher_TLS_PSK_DHE_WITH_AES_128_CCM_8 uint16 = 0xC0AA + cipher_TLS_PSK_DHE_WITH_AES_256_CCM_8 uint16 = 0xC0AB + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM uint16 = 0xC0AC + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM uint16 = 0xC0AD + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CCM_8 uint16 = 0xC0AE + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CCM_8 uint16 = 0xC0AF + // Unassigned uint16 = 0xC0B0-FF + // Unassigned uint16 = 0xC1-CB,* + // Unassigned uint16 = 0xCC00-A7 + cipher_TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA8 + cipher_TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCA9 + cipher_TLS_DHE_RSA_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAA + cipher_TLS_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAB + cipher_TLS_ECDHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAC + cipher_TLS_DHE_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAD + cipher_TLS_RSA_PSK_WITH_CHACHA20_POLY1305_SHA256 uint16 = 0xCCAE +) + +// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. +// References: +// https://tools.ietf.org/html/rfc7540#appendix-A +// Reject cipher suites from Appendix A. +// "This list includes those cipher suites that do not +// offer an ephemeral key exchange and those that are +// based on the TLS null, stream or block cipher type" +func isBadCipher(cipher uint16) bool { + switch cipher { + case cipher_TLS_NULL_WITH_NULL_NULL, + cipher_TLS_RSA_WITH_NULL_MD5, + cipher_TLS_RSA_WITH_NULL_SHA, + cipher_TLS_RSA_EXPORT_WITH_RC4_40_MD5, + cipher_TLS_RSA_WITH_RC4_128_MD5, + cipher_TLS_RSA_WITH_RC4_128_SHA, + cipher_TLS_RSA_EXPORT_WITH_RC2_CBC_40_MD5, + cipher_TLS_RSA_WITH_IDEA_CBC_SHA, + cipher_TLS_RSA_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_RSA_WITH_DES_CBC_SHA, + cipher_TLS_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DH_DSS_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DH_DSS_WITH_DES_CBC_SHA, + cipher_TLS_DH_DSS_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DH_RSA_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DH_RSA_WITH_DES_CBC_SHA, + cipher_TLS_DH_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DHE_DSS_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_DES_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DHE_RSA_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_DES_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DH_anon_EXPORT_WITH_RC4_40_MD5, + cipher_TLS_DH_anon_WITH_RC4_128_MD5, + cipher_TLS_DH_anon_EXPORT_WITH_DES40_CBC_SHA, + cipher_TLS_DH_anon_WITH_DES_CBC_SHA, + cipher_TLS_DH_anon_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_KRB5_WITH_DES_CBC_SHA, + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_KRB5_WITH_RC4_128_SHA, + cipher_TLS_KRB5_WITH_IDEA_CBC_SHA, + cipher_TLS_KRB5_WITH_DES_CBC_MD5, + cipher_TLS_KRB5_WITH_3DES_EDE_CBC_MD5, + cipher_TLS_KRB5_WITH_RC4_128_MD5, + cipher_TLS_KRB5_WITH_IDEA_CBC_MD5, + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_SHA, + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_SHA, + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_SHA, + cipher_TLS_KRB5_EXPORT_WITH_DES_CBC_40_MD5, + cipher_TLS_KRB5_EXPORT_WITH_RC2_CBC_40_MD5, + cipher_TLS_KRB5_EXPORT_WITH_RC4_40_MD5, + cipher_TLS_PSK_WITH_NULL_SHA, + cipher_TLS_DHE_PSK_WITH_NULL_SHA, + cipher_TLS_RSA_PSK_WITH_NULL_SHA, + cipher_TLS_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA, + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA, + cipher_TLS_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA, + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_WITH_NULL_SHA256, + cipher_TLS_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_RSA_WITH_AES_256_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_AES_128_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_AES_128_CBC_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_AES_256_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_AES_256_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_AES_256_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_AES_256_CBC_SHA256, + cipher_TLS_DH_anon_WITH_AES_128_CBC_SHA256, + cipher_TLS_DH_anon_WITH_AES_256_CBC_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA, + cipher_TLS_PSK_WITH_RC4_128_SHA, + cipher_TLS_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_DHE_PSK_WITH_RC4_128_SHA, + cipher_TLS_DHE_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_PSK_WITH_RC4_128_SHA, + cipher_TLS_RSA_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_RSA_WITH_SEED_CBC_SHA, + cipher_TLS_DH_DSS_WITH_SEED_CBC_SHA, + cipher_TLS_DH_RSA_WITH_SEED_CBC_SHA, + cipher_TLS_DHE_DSS_WITH_SEED_CBC_SHA, + cipher_TLS_DHE_RSA_WITH_SEED_CBC_SHA, + cipher_TLS_DH_anon_WITH_SEED_CBC_SHA, + cipher_TLS_RSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_RSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_DH_RSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_DH_RSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_DH_DSS_WITH_AES_128_GCM_SHA256, + cipher_TLS_DH_DSS_WITH_AES_256_GCM_SHA384, + cipher_TLS_DH_anon_WITH_AES_128_GCM_SHA256, + cipher_TLS_DH_anon_WITH_AES_256_GCM_SHA384, + cipher_TLS_PSK_WITH_AES_128_GCM_SHA256, + cipher_TLS_PSK_WITH_AES_256_GCM_SHA384, + cipher_TLS_RSA_PSK_WITH_AES_128_GCM_SHA256, + cipher_TLS_RSA_PSK_WITH_AES_256_GCM_SHA384, + cipher_TLS_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_PSK_WITH_NULL_SHA256, + cipher_TLS_PSK_WITH_NULL_SHA384, + cipher_TLS_DHE_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_DHE_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_DHE_PSK_WITH_NULL_SHA256, + cipher_TLS_DHE_PSK_WITH_NULL_SHA384, + cipher_TLS_RSA_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_RSA_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_RSA_PSK_WITH_NULL_SHA256, + cipher_TLS_RSA_PSK_WITH_NULL_SHA384, + cipher_TLS_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DH_anon_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_DH_anon_WITH_CAMELLIA_256_CBC_SHA256, + cipher_TLS_EMPTY_RENEGOTIATION_INFO_SCSV, + cipher_TLS_ECDH_ECDSA_WITH_NULL_SHA, + cipher_TLS_ECDH_ECDSA_WITH_RC4_128_SHA, + cipher_TLS_ECDH_ECDSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_NULL_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDH_RSA_WITH_NULL_SHA, + cipher_TLS_ECDH_RSA_WITH_RC4_128_SHA, + cipher_TLS_ECDH_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_RSA_WITH_NULL_SHA, + cipher_TLS_ECDHE_RSA_WITH_RC4_128_SHA, + cipher_TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDH_anon_WITH_NULL_SHA, + cipher_TLS_ECDH_anon_WITH_RC4_128_SHA, + cipher_TLS_ECDH_anon_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDH_anon_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDH_anon_WITH_AES_256_CBC_SHA, + cipher_TLS_SRP_SHA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_SRP_SHA_RSA_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_SRP_SHA_DSS_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_SRP_SHA_WITH_AES_128_CBC_SHA, + cipher_TLS_SRP_SHA_RSA_WITH_AES_128_CBC_SHA, + cipher_TLS_SRP_SHA_DSS_WITH_AES_128_CBC_SHA, + cipher_TLS_SRP_SHA_WITH_AES_256_CBC_SHA, + cipher_TLS_SRP_SHA_RSA_WITH_AES_256_CBC_SHA, + cipher_TLS_SRP_SHA_DSS_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDH_RSA_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDH_RSA_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_ECDH_RSA_WITH_AES_128_GCM_SHA256, + cipher_TLS_ECDH_RSA_WITH_AES_256_GCM_SHA384, + cipher_TLS_ECDHE_PSK_WITH_RC4_128_SHA, + cipher_TLS_ECDHE_PSK_WITH_3DES_EDE_CBC_SHA, + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA, + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA, + cipher_TLS_ECDHE_PSK_WITH_AES_128_CBC_SHA256, + cipher_TLS_ECDHE_PSK_WITH_AES_256_CBC_SHA384, + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA, + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA256, + cipher_TLS_ECDHE_PSK_WITH_NULL_SHA384, + cipher_TLS_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DH_DSS_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DH_DSS_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DH_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DH_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DHE_DSS_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DHE_DSS_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DHE_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DHE_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DH_anon_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DH_anon_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDHE_ECDSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDHE_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDHE_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDH_RSA_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDH_RSA_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_RSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_RSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_DH_RSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_DH_RSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_DH_DSS_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_DH_DSS_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_DH_anon_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_DH_anon_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_ECDH_RSA_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_ECDH_RSA_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_DHE_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_DHE_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_RSA_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_RSA_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_PSK_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_PSK_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_RSA_PSK_WITH_ARIA_128_GCM_SHA256, + cipher_TLS_RSA_PSK_WITH_ARIA_256_GCM_SHA384, + cipher_TLS_ECDHE_PSK_WITH_ARIA_128_CBC_SHA256, + cipher_TLS_ECDHE_PSK_WITH_ARIA_256_CBC_SHA384, + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDHE_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDHE_RSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_RSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_RSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_DH_RSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_DH_RSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_DH_DSS_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_DH_DSS_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_DH_anon_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_DH_anon_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_ECDH_ECDSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_ECDH_RSA_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_PSK_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_PSK_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_GCM_SHA256, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_GCM_SHA384, + cipher_TLS_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_DHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_DHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_RSA_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_128_CBC_SHA256, + cipher_TLS_ECDHE_PSK_WITH_CAMELLIA_256_CBC_SHA384, + cipher_TLS_RSA_WITH_AES_128_CCM, + cipher_TLS_RSA_WITH_AES_256_CCM, + cipher_TLS_RSA_WITH_AES_128_CCM_8, + cipher_TLS_RSA_WITH_AES_256_CCM_8, + cipher_TLS_PSK_WITH_AES_128_CCM, + cipher_TLS_PSK_WITH_AES_256_CCM, + cipher_TLS_PSK_WITH_AES_128_CCM_8, + cipher_TLS_PSK_WITH_AES_256_CCM_8: + return true + default: + return false + } +} diff --git a/vendor/golang.org/x/net/http2/client_conn_pool.go b/vendor/golang.org/x/net/http2/client_conn_pool.go index b13941258..f4d9b5ece 100644 --- a/vendor/golang.org/x/net/http2/client_conn_pool.go +++ b/vendor/golang.org/x/net/http2/client_conn_pool.go @@ -52,9 +52,31 @@ const ( noDialOnMiss = false ) +// shouldTraceGetConn reports whether getClientConn should call any +// ClientTrace.GetConn hook associated with the http.Request. +// +// This complexity is needed to avoid double calls of the GetConn hook +// during the back-and-forth between net/http and x/net/http2 (when the +// net/http.Transport is upgraded to also speak http2), as well as support +// the case where x/net/http2 is being used directly. +func (p *clientConnPool) shouldTraceGetConn(st clientConnIdleState) bool { + // If our Transport wasn't made via ConfigureTransport, always + // trace the GetConn hook if provided, because that means the + // http2 package is being used directly and it's the one + // dialing, as opposed to net/http. + if _, ok := p.t.ConnPool.(noDialClientConnPool); !ok { + return true + } + // Otherwise, only use the GetConn hook if this connection has + // been used previously for other requests. For fresh + // connections, the net/http package does the dialing. + return !st.freshConn +} + func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMiss bool) (*ClientConn, error) { if isConnectionCloseRequest(req) && dialOnMiss { // It gets its own connection. + traceGetConn(req, addr) const singleUse = true cc, err := p.t.dialClientConn(addr, singleUse) if err != nil { @@ -64,7 +86,10 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis } p.mu.Lock() for _, cc := range p.conns[addr] { - if cc.CanTakeNewRequest() { + if st := cc.idleState(); st.canTakeNewRequest { + if p.shouldTraceGetConn(st) { + traceGetConn(req, addr) + } p.mu.Unlock() return cc, nil } @@ -73,6 +98,7 @@ func (p *clientConnPool) getClientConn(req *http.Request, addr string, dialOnMis p.mu.Unlock() return nil, ErrNoCachedConn } + traceGetConn(req, addr) call := p.getStartDialLocked(addr) p.mu.Unlock() <-call.done @@ -247,7 +273,7 @@ func filterOutClientConn(in []*ClientConn, exclude *ClientConn) []*ClientConn { } // noDialClientConnPool is an implementation of http2.ClientConnPool -// which never dials. We let the HTTP/1.1 client dial and use its TLS +// which never dials. We let the HTTP/1.1 client dial and use its TLS // connection instead. type noDialClientConnPool struct{ *clientConnPool } diff --git a/vendor/golang.org/x/net/http2/configure_transport.go b/vendor/golang.org/x/net/http2/configure_transport.go deleted file mode 100644 index 4f720f530..000000000 --- a/vendor/golang.org/x/net/http2/configure_transport.go +++ /dev/null @@ -1,80 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.6 - -package http2 - -import ( - "crypto/tls" - "fmt" - "net/http" -) - -func configureTransport(t1 *http.Transport) (*Transport, error) { - connPool := new(clientConnPool) - t2 := &Transport{ - ConnPool: noDialClientConnPool{connPool}, - t1: t1, - } - connPool.t = t2 - if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil { - return nil, err - } - if t1.TLSClientConfig == nil { - t1.TLSClientConfig = new(tls.Config) - } - if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") { - t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...) - } - if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { - t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") - } - upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { - addr := authorityAddr("https", authority) - if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { - go c.Close() - return erringRoundTripper{err} - } else if !used { - // Turns out we don't need this c. - // For example, two goroutines made requests to the same host - // at the same time, both kicking off TCP dials. (since protocol - // was unknown) - go c.Close() - } - return t2 - } - if m := t1.TLSNextProto; len(m) == 0 { - t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ - "h2": upgradeFn, - } - } else { - m["h2"] = upgradeFn - } - return t2, nil -} - -// registerHTTPSProtocol calls Transport.RegisterProtocol but -// convering panics into errors. -func registerHTTPSProtocol(t *http.Transport, rt http.RoundTripper) (err error) { - defer func() { - if e := recover(); e != nil { - err = fmt.Errorf("%v", e) - } - }() - t.RegisterProtocol("https", rt) - return nil -} - -// noDialH2RoundTripper is a RoundTripper which only tries to complete the request -// if there's already has a cached connection to the host. -type noDialH2RoundTripper struct{ t *Transport } - -func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - res, err := rt.t.RoundTrip(req) - if err == ErrNoCachedConn { - return nil, http.ErrSkipAltProtocol - } - return res, err -} diff --git a/vendor/golang.org/x/net/http2/databuffer.go b/vendor/golang.org/x/net/http2/databuffer.go new file mode 100644 index 000000000..a3067f8de --- /dev/null +++ b/vendor/golang.org/x/net/http2/databuffer.go @@ -0,0 +1,146 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package http2 + +import ( + "errors" + "fmt" + "sync" +) + +// Buffer chunks are allocated from a pool to reduce pressure on GC. +// The maximum wasted space per dataBuffer is 2x the largest size class, +// which happens when the dataBuffer has multiple chunks and there is +// one unread byte in both the first and last chunks. We use a few size +// classes to minimize overheads for servers that typically receive very +// small request bodies. +// +// TODO: Benchmark to determine if the pools are necessary. The GC may have +// improved enough that we can instead allocate chunks like this: +// make([]byte, max(16<<10, expectedBytesRemaining)) +var ( + dataChunkSizeClasses = []int{ + 1 << 10, + 2 << 10, + 4 << 10, + 8 << 10, + 16 << 10, + } + dataChunkPools = [...]sync.Pool{ + {New: func() interface{} { return make([]byte, 1<<10) }}, + {New: func() interface{} { return make([]byte, 2<<10) }}, + {New: func() interface{} { return make([]byte, 4<<10) }}, + {New: func() interface{} { return make([]byte, 8<<10) }}, + {New: func() interface{} { return make([]byte, 16<<10) }}, + } +) + +func getDataBufferChunk(size int64) []byte { + i := 0 + for ; i < len(dataChunkSizeClasses)-1; i++ { + if size <= int64(dataChunkSizeClasses[i]) { + break + } + } + return dataChunkPools[i].Get().([]byte) +} + +func putDataBufferChunk(p []byte) { + for i, n := range dataChunkSizeClasses { + if len(p) == n { + dataChunkPools[i].Put(p) + return + } + } + panic(fmt.Sprintf("unexpected buffer len=%v", len(p))) +} + +// dataBuffer is an io.ReadWriter backed by a list of data chunks. +// Each dataBuffer is used to read DATA frames on a single stream. +// The buffer is divided into chunks so the server can limit the +// total memory used by a single connection without limiting the +// request body size on any single stream. +type dataBuffer struct { + chunks [][]byte + r int // next byte to read is chunks[0][r] + w int // next byte to write is chunks[len(chunks)-1][w] + size int // total buffered bytes + expected int64 // we expect at least this many bytes in future Write calls (ignored if <= 0) +} + +var errReadEmpty = errors.New("read from empty dataBuffer") + +// Read copies bytes from the buffer into p. +// It is an error to read when no data is available. +func (b *dataBuffer) Read(p []byte) (int, error) { + if b.size == 0 { + return 0, errReadEmpty + } + var ntotal int + for len(p) > 0 && b.size > 0 { + readFrom := b.bytesFromFirstChunk() + n := copy(p, readFrom) + p = p[n:] + ntotal += n + b.r += n + b.size -= n + // If the first chunk has been consumed, advance to the next chunk. + if b.r == len(b.chunks[0]) { + putDataBufferChunk(b.chunks[0]) + end := len(b.chunks) - 1 + copy(b.chunks[:end], b.chunks[1:]) + b.chunks[end] = nil + b.chunks = b.chunks[:end] + b.r = 0 + } + } + return ntotal, nil +} + +func (b *dataBuffer) bytesFromFirstChunk() []byte { + if len(b.chunks) == 1 { + return b.chunks[0][b.r:b.w] + } + return b.chunks[0][b.r:] +} + +// Len returns the number of bytes of the unread portion of the buffer. +func (b *dataBuffer) Len() int { + return b.size +} + +// Write appends p to the buffer. +func (b *dataBuffer) Write(p []byte) (int, error) { + ntotal := len(p) + for len(p) > 0 { + // If the last chunk is empty, allocate a new chunk. Try to allocate + // enough to fully copy p plus any additional bytes we expect to + // receive. However, this may allocate less than len(p). + want := int64(len(p)) + if b.expected > want { + want = b.expected + } + chunk := b.lastChunkOrAlloc(want) + n := copy(chunk[b.w:], p) + p = p[n:] + b.w += n + b.size += n + b.expected -= int64(n) + } + return ntotal, nil +} + +func (b *dataBuffer) lastChunkOrAlloc(want int64) []byte { + if len(b.chunks) != 0 { + last := b.chunks[len(b.chunks)-1] + if b.w < len(last) { + return last + } + } + chunk := getDataBufferChunk(want) + b.chunks = append(b.chunks, chunk) + b.w = 0 + return chunk +} diff --git a/vendor/golang.org/x/net/http2/errors.go b/vendor/golang.org/x/net/http2/errors.go index 20fd7626a..71f2c4631 100644 --- a/vendor/golang.org/x/net/http2/errors.go +++ b/vendor/golang.org/x/net/http2/errors.go @@ -87,13 +87,16 @@ type goAwayFlowError struct{} func (goAwayFlowError) Error() string { return "connection exceeded flow control window size" } -// connErrorReason wraps a ConnectionError with an informative error about why it occurs. - +// connError represents an HTTP/2 ConnectionError error code, along +// with a string (for debugging) explaining why. +// // Errors of this type are only returned by the frame parser functions -// and converted into ConnectionError(ErrCodeProtocol). +// and converted into ConnectionError(Code), after stashing away +// the Reason into the Framer's errDetail field, accessible via +// the (*Framer).ErrorDetail method. type connError struct { - Code ErrCode - Reason string + Code ErrCode // the ConnectionError error code + Reason string // additional reason } func (e connError) Error() string { diff --git a/vendor/golang.org/x/net/http2/fixed_buffer.go b/vendor/golang.org/x/net/http2/fixed_buffer.go deleted file mode 100644 index 47da0f0bf..000000000 --- a/vendor/golang.org/x/net/http2/fixed_buffer.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2014 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package http2 - -import ( - "errors" -) - -// fixedBuffer is an io.ReadWriter backed by a fixed size buffer. -// It never allocates, but moves old data as new data is written. -type fixedBuffer struct { - buf []byte - r, w int -} - -var ( - errReadEmpty = errors.New("read from empty fixedBuffer") - errWriteFull = errors.New("write on full fixedBuffer") -) - -// Read copies bytes from the buffer into p. -// It is an error to read when no data is available. -func (b *fixedBuffer) Read(p []byte) (n int, err error) { - if b.r == b.w { - return 0, errReadEmpty - } - n = copy(p, b.buf[b.r:b.w]) - b.r += n - if b.r == b.w { - b.r = 0 - b.w = 0 - } - return n, nil -} - -// Len returns the number of bytes of the unread portion of the buffer. -func (b *fixedBuffer) Len() int { - return b.w - b.r -} - -// Write copies bytes from p into the buffer. -// It is an error to write more data than the buffer can hold. -func (b *fixedBuffer) Write(p []byte) (n int, err error) { - // Slide existing data to beginning. - if b.r > 0 && len(p) > len(b.buf)-b.w { - copy(b.buf, b.buf[b.r:b.w]) - b.w -= b.r - b.r = 0 - } - - // Write new data. - n = copy(b.buf[b.w:], p) - b.w += n - if n < len(p) { - err = errWriteFull - } - return n, err -} diff --git a/vendor/golang.org/x/net/http2/flow.go b/vendor/golang.org/x/net/http2/flow.go index 957de2542..cea601fcd 100644 --- a/vendor/golang.org/x/net/http2/flow.go +++ b/vendor/golang.org/x/net/http2/flow.go @@ -41,10 +41,10 @@ func (f *flow) take(n int32) { // add adds n bytes (positive or negative) to the flow control window. // It returns false if the sum would exceed 2^31-1. func (f *flow) add(n int32) bool { - remain := (1<<31 - 1) - f.n - if n > remain { - return false + sum := f.n + n + if (sum > n) == (f.n > 0) { + f.n = sum + return true } - f.n += n - return true + return false } diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go index 358833fed..b46791d1d 100644 --- a/vendor/golang.org/x/net/http2/frame.go +++ b/vendor/golang.org/x/net/http2/frame.go @@ -14,8 +14,8 @@ import ( "strings" "sync" + "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" - "golang.org/x/net/lex/httplex" ) const frameHeaderLen = 9 @@ -122,7 +122,7 @@ var flagName = map[FrameType]map[Flags]string{ // a frameParser parses a frame given its FrameHeader and payload // bytes. The length of payload will always equal fh.Length (which // might be 0). -type frameParser func(fh FrameHeader, payload []byte) (Frame, error) +type frameParser func(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) var frameParsers = map[FrameType]frameParser{ FrameData: parseDataFrame, @@ -312,7 +312,7 @@ type Framer struct { MaxHeaderListSize uint32 // TODO: track which type of frame & with which flags was sent - // last. Then return an error (unless AllowIllegalWrites) if + // last. Then return an error (unless AllowIllegalWrites) if // we're in the middle of a header block and a // non-Continuation or Continuation on a different stream is // attempted to be written. @@ -323,6 +323,8 @@ type Framer struct { debugFramerBuf *bytes.Buffer debugReadLoggerf func(string, ...interface{}) debugWriteLoggerf func(string, ...interface{}) + + frameCache *frameCache // nil if frames aren't reused (default) } func (fr *Framer) maxHeaderListSize() uint32 { @@ -398,6 +400,27 @@ const ( maxFrameSize = 1<<24 - 1 ) +// SetReuseFrames allows the Framer to reuse Frames. +// If called on a Framer, Frames returned by calls to ReadFrame are only +// valid until the next call to ReadFrame. +func (fr *Framer) SetReuseFrames() { + if fr.frameCache != nil { + return + } + fr.frameCache = &frameCache{} +} + +type frameCache struct { + dataFrame DataFrame +} + +func (fc *frameCache) getDataFrame() *DataFrame { + if fc == nil { + return &DataFrame{} + } + return &fc.dataFrame +} + // NewFramer returns a Framer that writes frames to w and reads them from r. func NewFramer(w io.Writer, r io.Reader) *Framer { fr := &Framer{ @@ -477,7 +500,7 @@ func (fr *Framer) ReadFrame() (Frame, error) { if _, err := io.ReadFull(fr.r, payload); err != nil { return nil, err } - f, err := typeFrameParser(fh.Type)(fh, payload) + f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, payload) if err != nil { if ce, ok := err.(connError); ok { return nil, fr.connError(ce.Code, ce.Reason) @@ -565,7 +588,7 @@ func (f *DataFrame) Data() []byte { return f.data } -func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) { +func parseDataFrame(fc *frameCache, fh FrameHeader, payload []byte) (Frame, error) { if fh.StreamID == 0 { // DATA frames MUST be associated with a stream. If a // DATA frame is received whose stream identifier @@ -574,9 +597,9 @@ func parseDataFrame(fh FrameHeader, payload []byte) (Frame, error) { // PROTOCOL_ERROR. return nil, connError{ErrCodeProtocol, "DATA frame with stream ID 0"} } - f := &DataFrame{ - FrameHeader: fh, - } + f := fc.getDataFrame() + f.FrameHeader = fh + var padSize byte if fh.Flags.Has(FlagDataPadded) { var err error @@ -600,6 +623,7 @@ var ( errStreamID = errors.New("invalid stream ID") errDepStreamID = errors.New("invalid dependent stream ID") errPadLength = errors.New("pad length too large") + errPadBytes = errors.New("padding bytes must all be zeros unless AllowIllegalWrites is enabled") ) func validStreamIDOrZero(streamID uint32) bool { @@ -623,6 +647,7 @@ func (f *Framer) WriteData(streamID uint32, endStream bool, data []byte) error { // // If pad is nil, the padding bit is not sent. // The length of pad must not exceed 255 bytes. +// The bytes of pad must all be zero, unless f.AllowIllegalWrites is set. // // It will perform exactly one Write to the underlying Writer. // It is the caller's responsibility not to violate the maximum frame size @@ -631,8 +656,18 @@ func (f *Framer) WriteDataPadded(streamID uint32, endStream bool, data, pad []by if !validStreamID(streamID) && !f.AllowIllegalWrites { return errStreamID } - if len(pad) > 255 { - return errPadLength + if len(pad) > 0 { + if len(pad) > 255 { + return errPadLength + } + if !f.AllowIllegalWrites { + for _, b := range pad { + if b != 0 { + // "Padding octets MUST be set to zero when sending." + return errPadBytes + } + } + } } var flags Flags if endStream { @@ -660,10 +695,10 @@ type SettingsFrame struct { p []byte } -func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) { +func parseSettingsFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { if fh.Flags.Has(FlagSettingsAck) && fh.Length > 0 { // When this (ACK 0x1) bit is set, the payload of the - // SETTINGS frame MUST be empty. Receipt of a + // SETTINGS frame MUST be empty. Receipt of a // SETTINGS frame with the ACK flag set and a length // field value other than 0 MUST be treated as a // connection error (Section 5.4.1) of type @@ -672,7 +707,7 @@ func parseSettingsFrame(fh FrameHeader, p []byte) (Frame, error) { } if fh.StreamID != 0 { // SETTINGS frames always apply to a connection, - // never a single stream. The stream identifier for a + // never a single stream. The stream identifier for a // SETTINGS frame MUST be zero (0x0). If an endpoint // receives a SETTINGS frame whose stream identifier // field is anything other than 0x0, the endpoint MUST @@ -698,32 +733,67 @@ func (f *SettingsFrame) IsAck() bool { return f.FrameHeader.Flags.Has(FlagSettingsAck) } -func (f *SettingsFrame) Value(s SettingID) (v uint32, ok bool) { +func (f *SettingsFrame) Value(id SettingID) (v uint32, ok bool) { f.checkValid() - buf := f.p - for len(buf) > 0 { - settingID := SettingID(binary.BigEndian.Uint16(buf[:2])) - if settingID == s { - return binary.BigEndian.Uint32(buf[2:6]), true + for i := 0; i < f.NumSettings(); i++ { + if s := f.Setting(i); s.ID == id { + return s.Val, true } - buf = buf[6:] } return 0, false } +// Setting returns the setting from the frame at the given 0-based index. +// The index must be >= 0 and less than f.NumSettings(). +func (f *SettingsFrame) Setting(i int) Setting { + buf := f.p + return Setting{ + ID: SettingID(binary.BigEndian.Uint16(buf[i*6 : i*6+2])), + Val: binary.BigEndian.Uint32(buf[i*6+2 : i*6+6]), + } +} + +func (f *SettingsFrame) NumSettings() int { return len(f.p) / 6 } + +// HasDuplicates reports whether f contains any duplicate setting IDs. +func (f *SettingsFrame) HasDuplicates() bool { + num := f.NumSettings() + if num == 0 { + return false + } + // If it's small enough (the common case), just do the n^2 + // thing and avoid a map allocation. + if num < 10 { + for i := 0; i < num; i++ { + idi := f.Setting(i).ID + for j := i + 1; j < num; j++ { + idj := f.Setting(j).ID + if idi == idj { + return true + } + } + } + return false + } + seen := map[SettingID]bool{} + for i := 0; i < num; i++ { + id := f.Setting(i).ID + if seen[id] { + return true + } + seen[id] = true + } + return false +} + // ForeachSetting runs fn for each setting. // It stops and returns the first error. func (f *SettingsFrame) ForeachSetting(fn func(Setting) error) error { f.checkValid() - buf := f.p - for len(buf) > 0 { - if err := fn(Setting{ - SettingID(binary.BigEndian.Uint16(buf[:2])), - binary.BigEndian.Uint32(buf[2:6]), - }); err != nil { + for i := 0; i < f.NumSettings(); i++ { + if err := fn(f.Setting(i)); err != nil { return err } - buf = buf[6:] } return nil } @@ -762,7 +832,7 @@ type PingFrame struct { func (f *PingFrame) IsAck() bool { return f.Flags.Has(FlagPingAck) } -func parsePingFrame(fh FrameHeader, payload []byte) (Frame, error) { +func parsePingFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { if len(payload) != 8 { return nil, ConnectionError(ErrCodeFrameSize) } @@ -802,7 +872,7 @@ func (f *GoAwayFrame) DebugData() []byte { return f.debugData } -func parseGoAwayFrame(fh FrameHeader, p []byte) (Frame, error) { +func parseGoAwayFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { if fh.StreamID != 0 { return nil, ConnectionError(ErrCodeProtocol) } @@ -842,7 +912,7 @@ func (f *UnknownFrame) Payload() []byte { return f.p } -func parseUnknownFrame(fh FrameHeader, p []byte) (Frame, error) { +func parseUnknownFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { return &UnknownFrame{fh, p}, nil } @@ -853,7 +923,7 @@ type WindowUpdateFrame struct { Increment uint32 // never read with high bit set } -func parseWindowUpdateFrame(fh FrameHeader, p []byte) (Frame, error) { +func parseWindowUpdateFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { if len(p) != 4 { return nil, ConnectionError(ErrCodeFrameSize) } @@ -918,12 +988,12 @@ func (f *HeadersFrame) HasPriority() bool { return f.FrameHeader.Flags.Has(FlagHeadersPriority) } -func parseHeadersFrame(fh FrameHeader, p []byte) (_ Frame, err error) { +func parseHeadersFrame(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { hf := &HeadersFrame{ FrameHeader: fh, } if fh.StreamID == 0 { - // HEADERS frames MUST be associated with a stream. If a HEADERS frame + // HEADERS frames MUST be associated with a stream. If a HEADERS frame // is received whose stream identifier field is 0x0, the recipient MUST // respond with a connection error (Section 5.4.1) of type // PROTOCOL_ERROR. @@ -1045,7 +1115,7 @@ type PriorityParam struct { Exclusive bool // Weight is the stream's zero-indexed weight. It should be - // set together with StreamDep, or neither should be set. Per + // set together with StreamDep, or neither should be set. Per // the spec, "Add one to the value to obtain a weight between // 1 and 256." Weight uint8 @@ -1055,7 +1125,7 @@ func (p PriorityParam) IsZero() bool { return p == PriorityParam{} } -func parsePriorityFrame(fh FrameHeader, payload []byte) (Frame, error) { +func parsePriorityFrame(_ *frameCache, fh FrameHeader, payload []byte) (Frame, error) { if fh.StreamID == 0 { return nil, connError{ErrCodeProtocol, "PRIORITY frame with stream ID 0"} } @@ -1102,7 +1172,7 @@ type RSTStreamFrame struct { ErrCode ErrCode } -func parseRSTStreamFrame(fh FrameHeader, p []byte) (Frame, error) { +func parseRSTStreamFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { if len(p) != 4 { return nil, ConnectionError(ErrCodeFrameSize) } @@ -1132,7 +1202,7 @@ type ContinuationFrame struct { headerFragBuf []byte } -func parseContinuationFrame(fh FrameHeader, p []byte) (Frame, error) { +func parseContinuationFrame(_ *frameCache, fh FrameHeader, p []byte) (Frame, error) { if fh.StreamID == 0 { return nil, connError{ErrCodeProtocol, "CONTINUATION frame with stream ID 0"} } @@ -1182,7 +1252,7 @@ func (f *PushPromiseFrame) HeadersEnded() bool { return f.FrameHeader.Flags.Has(FlagPushPromiseEndHeaders) } -func parsePushPromise(fh FrameHeader, p []byte) (_ Frame, err error) { +func parsePushPromise(_ *frameCache, fh FrameHeader, p []byte) (_ Frame, err error) { pp := &PushPromiseFrame{ FrameHeader: fh, } @@ -1407,7 +1477,7 @@ func (fr *Framer) maxHeaderStringLen() int { } // readMetaFrame returns 0 or more CONTINUATION frames from fr and -// merge them into into the provided hf and returns a MetaHeadersFrame +// merge them into the provided hf and returns a MetaHeadersFrame // with the decoded hpack values. func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { if fr.AllowIllegalReads { @@ -1427,7 +1497,7 @@ func (fr *Framer) readMetaFrame(hf *HeadersFrame) (*MetaHeadersFrame, error) { if VerboseLogs && fr.logReads { fr.debugReadLoggerf("http2: decoded hpack field %+v", hf) } - if !httplex.ValidHeaderFieldValue(hf.Value) { + if !httpguts.ValidHeaderFieldValue(hf.Value) { invalid = headerFieldValueError(hf.Value) } isPseudo := strings.HasPrefix(hf.Name, ":") diff --git a/vendor/golang.org/x/net/http2/go111.go b/vendor/golang.org/x/net/http2/go111.go new file mode 100644 index 000000000..3a131016b --- /dev/null +++ b/vendor/golang.org/x/net/http2/go111.go @@ -0,0 +1,29 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build go1.11 + +package http2 + +import ( + "net/http/httptrace" + "net/textproto" +) + +func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { + return trace != nil && trace.WroteHeaderField != nil +} + +func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) { + if trace != nil && trace.WroteHeaderField != nil { + trace.WroteHeaderField(k, []string{v}) + } +} + +func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { + if trace != nil { + return trace.Got1xxResponse + } + return nil +} diff --git a/vendor/golang.org/x/net/http2/go16.go b/vendor/golang.org/x/net/http2/go16.go deleted file mode 100644 index 2b72855f5..000000000 --- a/vendor/golang.org/x/net/http2/go16.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.6 - -package http2 - -import ( - "crypto/tls" - "net/http" - "time" -) - -func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { - return t1.ExpectContinueTimeout -} - -// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. -func isBadCipher(cipher uint16) bool { - switch cipher { - case tls.TLS_RSA_WITH_RC4_128_SHA, - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_GCM_SHA256, - tls.TLS_RSA_WITH_AES_256_GCM_SHA384, - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: - // Reject cipher suites from Appendix A. - // "This list includes those cipher suites that do not - // offer an ephemeral key exchange and those that are - // based on the TLS null, stream or block cipher type" - return true - default: - return false - } -} diff --git a/vendor/golang.org/x/net/http2/go17.go b/vendor/golang.org/x/net/http2/go17.go deleted file mode 100644 index 47b7fae08..000000000 --- a/vendor/golang.org/x/net/http2/go17.go +++ /dev/null @@ -1,106 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7 - -package http2 - -import ( - "context" - "net" - "net/http" - "net/http/httptrace" - "time" -) - -type contextContext interface { - context.Context -} - -func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { - ctx, cancel = context.WithCancel(context.Background()) - ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr()) - if hs := opts.baseConfig(); hs != nil { - ctx = context.WithValue(ctx, http.ServerContextKey, hs) - } - return -} - -func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { - return context.WithCancel(ctx) -} - -func requestWithContext(req *http.Request, ctx contextContext) *http.Request { - return req.WithContext(ctx) -} - -type clientTrace httptrace.ClientTrace - -func reqContext(r *http.Request) context.Context { return r.Context() } - -func (t *Transport) idleConnTimeout() time.Duration { - if t.t1 != nil { - return t.t1.IdleConnTimeout - } - return 0 -} - -func setResponseUncompressed(res *http.Response) { res.Uncompressed = true } - -func traceGotConn(req *http.Request, cc *ClientConn) { - trace := httptrace.ContextClientTrace(req.Context()) - if trace == nil || trace.GotConn == nil { - return - } - ci := httptrace.GotConnInfo{Conn: cc.tconn} - cc.mu.Lock() - ci.Reused = cc.nextStreamID > 1 - ci.WasIdle = len(cc.streams) == 0 && ci.Reused - if ci.WasIdle && !cc.lastActive.IsZero() { - ci.IdleTime = time.Now().Sub(cc.lastActive) - } - cc.mu.Unlock() - - trace.GotConn(ci) -} - -func traceWroteHeaders(trace *clientTrace) { - if trace != nil && trace.WroteHeaders != nil { - trace.WroteHeaders() - } -} - -func traceGot100Continue(trace *clientTrace) { - if trace != nil && trace.Got100Continue != nil { - trace.Got100Continue() - } -} - -func traceWait100Continue(trace *clientTrace) { - if trace != nil && trace.Wait100Continue != nil { - trace.Wait100Continue() - } -} - -func traceWroteRequest(trace *clientTrace, err error) { - if trace != nil && trace.WroteRequest != nil { - trace.WroteRequest(httptrace.WroteRequestInfo{Err: err}) - } -} - -func traceFirstResponseByte(trace *clientTrace) { - if trace != nil && trace.GotFirstResponseByte != nil { - trace.GotFirstResponseByte() - } -} - -func requestTrace(req *http.Request) *clientTrace { - trace := httptrace.ContextClientTrace(req.Context()) - return (*clientTrace)(trace) -} - -// Ping sends a PING frame to the server and waits for the ack. -func (cc *ClientConn) Ping(ctx context.Context) error { - return cc.ping(ctx) -} diff --git a/vendor/golang.org/x/net/http2/go17_not18.go b/vendor/golang.org/x/net/http2/go17_not18.go deleted file mode 100644 index b4c52ecec..000000000 --- a/vendor/golang.org/x/net/http2/go17_not18.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.7,!go1.8 - -package http2 - -import "crypto/tls" - -// temporary copy of Go 1.7's private tls.Config.clone: -func cloneTLSConfig(c *tls.Config) *tls.Config { - return &tls.Config{ - Rand: c.Rand, - Time: c.Time, - Certificates: c.Certificates, - NameToCertificate: c.NameToCertificate, - GetCertificate: c.GetCertificate, - RootCAs: c.RootCAs, - NextProtos: c.NextProtos, - ServerName: c.ServerName, - ClientAuth: c.ClientAuth, - ClientCAs: c.ClientCAs, - InsecureSkipVerify: c.InsecureSkipVerify, - CipherSuites: c.CipherSuites, - PreferServerCipherSuites: c.PreferServerCipherSuites, - SessionTicketsDisabled: c.SessionTicketsDisabled, - SessionTicketKey: c.SessionTicketKey, - ClientSessionCache: c.ClientSessionCache, - MinVersion: c.MinVersion, - MaxVersion: c.MaxVersion, - CurvePreferences: c.CurvePreferences, - DynamicRecordSizingDisabled: c.DynamicRecordSizingDisabled, - Renegotiation: c.Renegotiation, - } -} diff --git a/vendor/golang.org/x/net/http2/go18.go b/vendor/golang.org/x/net/http2/go18.go deleted file mode 100644 index 633202c39..000000000 --- a/vendor/golang.org/x/net/http2/go18.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build go1.8 - -package http2 - -import ( - "crypto/tls" - "io" - "net/http" -) - -func cloneTLSConfig(c *tls.Config) *tls.Config { return c.Clone() } - -var _ http.Pusher = (*responseWriter)(nil) - -// Push implements http.Pusher. -func (w *responseWriter) Push(target string, opts *http.PushOptions) error { - internalOpts := pushOptions{} - if opts != nil { - internalOpts.Method = opts.Method - internalOpts.Header = opts.Header - } - return w.push(target, internalOpts) -} - -func configureServer18(h1 *http.Server, h2 *Server) error { - if h2.IdleTimeout == 0 { - if h1.IdleTimeout != 0 { - h2.IdleTimeout = h1.IdleTimeout - } else { - h2.IdleTimeout = h1.ReadTimeout - } - } - return nil -} - -func shouldLogPanic(panicValue interface{}) bool { - return panicValue != nil && panicValue != http.ErrAbortHandler -} - -func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { - return req.GetBody -} - -func reqBodyIsNoBody(body io.ReadCloser) bool { - return body == http.NoBody -} diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/http2/headermap.go index c2805f6ac..c3ff3fa1c 100644 --- a/vendor/golang.org/x/net/http2/headermap.go +++ b/vendor/golang.org/x/net/http2/headermap.go @@ -7,15 +7,21 @@ package http2 import ( "net/http" "strings" + "sync" ) var ( - commonLowerHeader = map[string]string{} // Go-Canonical-Case -> lower-case - commonCanonHeader = map[string]string{} // lower-case -> Go-Canonical-Case + commonBuildOnce sync.Once + commonLowerHeader map[string]string // Go-Canonical-Case -> lower-case + commonCanonHeader map[string]string // lower-case -> Go-Canonical-Case ) -func init() { - for _, v := range []string{ +func buildCommonHeaderMapsOnce() { + commonBuildOnce.Do(buildCommonHeaderMaps) +} + +func buildCommonHeaderMaps() { + common := []string{ "accept", "accept-charset", "accept-encoding", @@ -63,7 +69,10 @@ func init() { "vary", "via", "www-authenticate", - } { + } + commonLowerHeader = make(map[string]string, len(common)) + commonCanonHeader = make(map[string]string, len(common)) + for _, v := range common { chk := http.CanonicalHeaderKey(v) commonLowerHeader[chk] = v commonCanonHeader[v] = chk @@ -71,6 +80,7 @@ func init() { } func lowerHeader(v string) string { + buildCommonHeaderMapsOnce() if s, ok := commonLowerHeader[v]; ok { return s } diff --git a/vendor/golang.org/x/net/http2/hpack/encode.go b/vendor/golang.org/x/net/http2/hpack/encode.go index f9bb03398..1565cf270 100644 --- a/vendor/golang.org/x/net/http2/hpack/encode.go +++ b/vendor/golang.org/x/net/http2/hpack/encode.go @@ -39,13 +39,14 @@ func NewEncoder(w io.Writer) *Encoder { tableSizeUpdate: false, w: w, } + e.dynTab.table.init() e.dynTab.setMaxSize(initialHeaderTableSize) return e } // WriteField encodes f into a single Write to e's underlying Writer. // This function may also produce bytes for "Header Table Size Update" -// if necessary. If produced, it is done before encoding f. +// if necessary. If produced, it is done before encoding f. func (e *Encoder) WriteField(f HeaderField) error { e.buf = e.buf[:0] @@ -88,29 +89,17 @@ func (e *Encoder) WriteField(f HeaderField) error { // only name matches, i points to that index and nameValueMatch // becomes false. func (e *Encoder) searchTable(f HeaderField) (i uint64, nameValueMatch bool) { - for idx, hf := range staticTable { - if !constantTimeStringCompare(hf.Name, f.Name) { - continue - } - if i == 0 { - i = uint64(idx + 1) - } - if f.Sensitive { - continue - } - if !constantTimeStringCompare(hf.Value, f.Value) { - continue - } - i = uint64(idx + 1) - nameValueMatch = true - return + i, nameValueMatch = staticTable.search(f) + if nameValueMatch { + return i, true } - j, nameValueMatch := e.dynTab.search(f) + j, nameValueMatch := e.dynTab.table.search(f) if nameValueMatch || (i == 0 && j != 0) { - i = j + uint64(len(staticTable)) + return j + uint64(staticTable.len()), nameValueMatch } - return + + return i, false } // SetMaxDynamicTableSize changes the dynamic header table size to v. @@ -217,7 +206,7 @@ func appendVarInt(dst []byte, n byte, i uint64) []byte { } // appendHpackString appends s, as encoded in "String Literal" -// representation, to dst and returns the the extended buffer. +// representation, to dst and returns the extended buffer. // // s will be encoded in Huffman codes only when it produces strictly // shorter byte string. diff --git a/vendor/golang.org/x/net/http2/hpack/hpack.go b/vendor/golang.org/x/net/http2/hpack/hpack.go index 135b9f62c..85f18a2b0 100644 --- a/vendor/golang.org/x/net/http2/hpack/hpack.go +++ b/vendor/golang.org/x/net/http2/hpack/hpack.go @@ -61,7 +61,7 @@ func (hf HeaderField) String() string { func (hf HeaderField) Size() uint32 { // http://http2.github.io/http2-spec/compression.html#rfc.section.4.1 // "The size of the dynamic table is the sum of the size of - // its entries. The size of an entry is the sum of its name's + // its entries. The size of an entry is the sum of its name's // length in octets (as defined in Section 5.2), its value's // length in octets (see Section 5.2), plus 32. The size of // an entry is calculated using the length of the name and @@ -92,6 +92,8 @@ type Decoder struct { // saveBuf is previous data passed to Write which we weren't able // to fully parse before. Unlike buf, we own this data. saveBuf bytes.Buffer + + firstField bool // processing the first field of the header block } // NewDecoder returns a new decoder with the provided maximum dynamic @@ -101,7 +103,9 @@ func NewDecoder(maxDynamicTableSize uint32, emitFunc func(f HeaderField)) *Decod d := &Decoder{ emit: emitFunc, emitEnabled: true, + firstField: true, } + d.dynTab.table.init() d.dynTab.allowedMaxSize = maxDynamicTableSize d.dynTab.setMaxSize(maxDynamicTableSize) return d @@ -154,12 +158,9 @@ func (d *Decoder) SetAllowedMaxDynamicTableSize(v uint32) { } type dynamicTable struct { - // ents is the FIFO described at // http://http2.github.io/http2-spec/compression.html#rfc.section.2.3.2 - // The newest (low index) is append at the end, and items are - // evicted from the front. - ents []HeaderField - size uint32 + table headerFieldTable + size uint32 // in bytes maxSize uint32 // current maxSize allowedMaxSize uint32 // maxSize may go up to this, inclusive } @@ -169,95 +170,45 @@ func (dt *dynamicTable) setMaxSize(v uint32) { dt.evict() } -// TODO: change dynamicTable to be a struct with a slice and a size int field, -// per http://http2.github.io/http2-spec/compression.html#rfc.section.4.1: -// -// -// Then make add increment the size. maybe the max size should move from Decoder to -// dynamicTable and add should return an ok bool if there was enough space. -// -// Later we'll need a remove operation on dynamicTable. - func (dt *dynamicTable) add(f HeaderField) { - dt.ents = append(dt.ents, f) + dt.table.addEntry(f) dt.size += f.Size() dt.evict() } -// If we're too big, evict old stuff (front of the slice) +// If we're too big, evict old stuff. func (dt *dynamicTable) evict() { - base := dt.ents // keep base pointer of slice - for dt.size > dt.maxSize { - dt.size -= dt.ents[0].Size() - dt.ents = dt.ents[1:] - } - - // Shift slice contents down if we evicted things. - if len(dt.ents) != len(base) { - copy(base, dt.ents) - dt.ents = base[:len(dt.ents)] - } -} - -// constantTimeStringCompare compares string a and b in a constant -// time manner. -func constantTimeStringCompare(a, b string) bool { - if len(a) != len(b) { - return false - } - - c := byte(0) - - for i := 0; i < len(a); i++ { - c |= a[i] ^ b[i] + var n int + for dt.size > dt.maxSize && n < dt.table.len() { + dt.size -= dt.table.ents[n].Size() + n++ } - - return c == 0 -} - -// Search searches f in the table. The return value i is 0 if there is -// no name match. If there is name match or name/value match, i is the -// index of that entry (1-based). If both name and value match, -// nameValueMatch becomes true. -func (dt *dynamicTable) search(f HeaderField) (i uint64, nameValueMatch bool) { - l := len(dt.ents) - for j := l - 1; j >= 0; j-- { - ent := dt.ents[j] - if !constantTimeStringCompare(ent.Name, f.Name) { - continue - } - if i == 0 { - i = uint64(l - j) - } - if f.Sensitive { - continue - } - if !constantTimeStringCompare(ent.Value, f.Value) { - continue - } - i = uint64(l - j) - nameValueMatch = true - return - } - return + dt.table.evictOldest(n) } func (d *Decoder) maxTableIndex() int { - return len(d.dynTab.ents) + len(staticTable) + // This should never overflow. RFC 7540 Section 6.5.2 limits the size of + // the dynamic table to 2^32 bytes, where each entry will occupy more than + // one byte. Further, the staticTable has a fixed, small length. + return d.dynTab.table.len() + staticTable.len() } func (d *Decoder) at(i uint64) (hf HeaderField, ok bool) { - if i < 1 { + // See Section 2.3.3. + if i == 0 { return } + if i <= uint64(staticTable.len()) { + return staticTable.ents[i-1], true + } if i > uint64(d.maxTableIndex()) { return } - if i <= uint64(len(staticTable)) { - return staticTable[i-1], true - } - dents := d.dynTab.ents - return dents[len(dents)-(int(i)-len(staticTable))], true + // In the dynamic table, newer entries have lower indices. + // However, dt.ents[0] is the oldest entry. Hence, dt.ents is + // the reversed dynamic table. + dt := d.dynTab.table + return dt.ents[dt.len()-(int(i)-staticTable.len())], true } // Decode decodes an entire block. @@ -278,11 +229,15 @@ func (d *Decoder) DecodeFull(p []byte) ([]HeaderField, error) { return hf, nil } +// Close declares that the decoding is complete and resets the Decoder +// to be reused again for a new header block. If there is any remaining +// data in the decoder's buffer, Close returns an error. func (d *Decoder) Close() error { if d.saveBuf.Len() > 0 { d.saveBuf.Reset() return DecodingError{errors.New("truncated headers")} } + d.firstField = true return nil } @@ -307,7 +262,7 @@ func (d *Decoder) Write(p []byte) (n int, err error) { err = d.parseHeaderFieldRepr() if err == errNeedMore { // Extra paranoia, making sure saveBuf won't - // get too large. All the varint and string + // get too large. All the varint and string // reading code earlier should already catch // overlong things and return ErrStringLength, // but keep this as a last resort. @@ -318,6 +273,7 @@ func (d *Decoder) Write(p []byte) (n int, err error) { d.saveBuf.Write(d.buf) return len(p), nil } + d.firstField = false if err != nil { break } @@ -441,6 +397,12 @@ func (d *Decoder) callEmit(hf HeaderField) error { // (same invariants and behavior as parseHeaderFieldRepr) func (d *Decoder) parseDynamicTableSizeUpdate() error { + // RFC 7541, sec 4.2: This dynamic table size update MUST occur at the + // beginning of the first header block following the change to the dynamic table size. + if !d.firstField && d.dynTab.size > 0 { + return DecodingError{errors.New("dynamic table size update MUST occur at the beginning of a header block")} + } + buf := d.buf size, buf, err := readVarInt(5, buf) if err != nil { diff --git a/vendor/golang.org/x/net/http2/hpack/huffman.go b/vendor/golang.org/x/net/http2/hpack/huffman.go index 8850e3946..b412a96c5 100644 --- a/vendor/golang.org/x/net/http2/hpack/huffman.go +++ b/vendor/golang.org/x/net/http2/hpack/huffman.go @@ -47,6 +47,7 @@ var ErrInvalidHuffman = errors.New("hpack: invalid Huffman-encoded data") // If maxLen is greater than 0, attempts to write more to buf than // maxLen bytes will return ErrStringLength. func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { + rootHuffmanNode := getRootHuffmanNode() n := rootHuffmanNode // cur is the bit buffer that has not been fed into n. // cbits is the number of low order bits in cur that are valid. @@ -106,7 +107,7 @@ func huffmanDecode(buf *bytes.Buffer, maxLen int, v []byte) error { type node struct { // children is non-nil for internal nodes - children []*node + children *[256]*node // The following are only valid if children is nil: codeLen uint8 // number of bits that led to the output of sym @@ -114,22 +115,31 @@ type node struct { } func newInternalNode() *node { - return &node{children: make([]*node, 256)} + return &node{children: new([256]*node)} } -var rootHuffmanNode = newInternalNode() +var ( + buildRootOnce sync.Once + lazyRootHuffmanNode *node +) + +func getRootHuffmanNode() *node { + buildRootOnce.Do(buildRootHuffmanNode) + return lazyRootHuffmanNode +} -func init() { +func buildRootHuffmanNode() { if len(huffmanCodes) != 256 { panic("unexpected size") } + lazyRootHuffmanNode = newInternalNode() for i, code := range huffmanCodes { addDecoderNode(byte(i), code, huffmanCodeLen[i]) } } func addDecoderNode(sym byte, code uint32, codeLen uint8) { - cur := rootHuffmanNode + cur := lazyRootHuffmanNode for codeLen > 8 { codeLen -= 8 i := uint8(code >> codeLen) diff --git a/vendor/golang.org/x/net/http2/hpack/tables.go b/vendor/golang.org/x/net/http2/hpack/tables.go index b9283a023..a66cfbea6 100644 --- a/vendor/golang.org/x/net/http2/hpack/tables.go +++ b/vendor/golang.org/x/net/http2/hpack/tables.go @@ -4,73 +4,200 @@ package hpack -func pair(name, value string) HeaderField { - return HeaderField{Name: name, Value: value} +import ( + "fmt" +) + +// headerFieldTable implements a list of HeaderFields. +// This is used to implement the static and dynamic tables. +type headerFieldTable struct { + // For static tables, entries are never evicted. + // + // For dynamic tables, entries are evicted from ents[0] and added to the end. + // Each entry has a unique id that starts at one and increments for each + // entry that is added. This unique id is stable across evictions, meaning + // it can be used as a pointer to a specific entry. As in hpack, unique ids + // are 1-based. The unique id for ents[k] is k + evictCount + 1. + // + // Zero is not a valid unique id. + // + // evictCount should not overflow in any remotely practical situation. In + // practice, we will have one dynamic table per HTTP/2 connection. If we + // assume a very powerful server that handles 1M QPS per connection and each + // request adds (then evicts) 100 entries from the table, it would still take + // 2M years for evictCount to overflow. + ents []HeaderField + evictCount uint64 + + // byName maps a HeaderField name to the unique id of the newest entry with + // the same name. See above for a definition of "unique id". + byName map[string]uint64 + + // byNameValue maps a HeaderField name/value pair to the unique id of the newest + // entry with the same name and value. See above for a definition of "unique id". + byNameValue map[pairNameValue]uint64 +} + +type pairNameValue struct { + name, value string +} + +func (t *headerFieldTable) init() { + t.byName = make(map[string]uint64) + t.byNameValue = make(map[pairNameValue]uint64) +} + +// len reports the number of entries in the table. +func (t *headerFieldTable) len() int { + return len(t.ents) +} + +// addEntry adds a new entry. +func (t *headerFieldTable) addEntry(f HeaderField) { + id := uint64(t.len()) + t.evictCount + 1 + t.byName[f.Name] = id + t.byNameValue[pairNameValue{f.Name, f.Value}] = id + t.ents = append(t.ents, f) +} + +// evictOldest evicts the n oldest entries in the table. +func (t *headerFieldTable) evictOldest(n int) { + if n > t.len() { + panic(fmt.Sprintf("evictOldest(%v) on table with %v entries", n, t.len())) + } + for k := 0; k < n; k++ { + f := t.ents[k] + id := t.evictCount + uint64(k) + 1 + if t.byName[f.Name] == id { + delete(t.byName, f.Name) + } + if p := (pairNameValue{f.Name, f.Value}); t.byNameValue[p] == id { + delete(t.byNameValue, p) + } + } + copy(t.ents, t.ents[n:]) + for k := t.len() - n; k < t.len(); k++ { + t.ents[k] = HeaderField{} // so strings can be garbage collected + } + t.ents = t.ents[:t.len()-n] + if t.evictCount+uint64(n) < t.evictCount { + panic("evictCount overflow") + } + t.evictCount += uint64(n) +} + +// search finds f in the table. If there is no match, i is 0. +// If both name and value match, i is the matched index and nameValueMatch +// becomes true. If only name matches, i points to that index and +// nameValueMatch becomes false. +// +// The returned index is a 1-based HPACK index. For dynamic tables, HPACK says +// that index 1 should be the newest entry, but t.ents[0] is the oldest entry, +// meaning t.ents is reversed for dynamic tables. Hence, when t is a dynamic +// table, the return value i actually refers to the entry t.ents[t.len()-i]. +// +// All tables are assumed to be a dynamic tables except for the global +// staticTable pointer. +// +// See Section 2.3.3. +func (t *headerFieldTable) search(f HeaderField) (i uint64, nameValueMatch bool) { + if !f.Sensitive { + if id := t.byNameValue[pairNameValue{f.Name, f.Value}]; id != 0 { + return t.idToIndex(id), true + } + } + if id := t.byName[f.Name]; id != 0 { + return t.idToIndex(id), false + } + return 0, false +} + +// idToIndex converts a unique id to an HPACK index. +// See Section 2.3.3. +func (t *headerFieldTable) idToIndex(id uint64) uint64 { + if id <= t.evictCount { + panic(fmt.Sprintf("id (%v) <= evictCount (%v)", id, t.evictCount)) + } + k := id - t.evictCount - 1 // convert id to an index t.ents[k] + if t != staticTable { + return uint64(t.len()) - k // dynamic table + } + return k + 1 } // http://tools.ietf.org/html/draft-ietf-httpbis-header-compression-07#appendix-B -var staticTable = [...]HeaderField{ - pair(":authority", ""), // index 1 (1-based) - pair(":method", "GET"), - pair(":method", "POST"), - pair(":path", "/"), - pair(":path", "/index.html"), - pair(":scheme", "http"), - pair(":scheme", "https"), - pair(":status", "200"), - pair(":status", "204"), - pair(":status", "206"), - pair(":status", "304"), - pair(":status", "400"), - pair(":status", "404"), - pair(":status", "500"), - pair("accept-charset", ""), - pair("accept-encoding", "gzip, deflate"), - pair("accept-language", ""), - pair("accept-ranges", ""), - pair("accept", ""), - pair("access-control-allow-origin", ""), - pair("age", ""), - pair("allow", ""), - pair("authorization", ""), - pair("cache-control", ""), - pair("content-disposition", ""), - pair("content-encoding", ""), - pair("content-language", ""), - pair("content-length", ""), - pair("content-location", ""), - pair("content-range", ""), - pair("content-type", ""), - pair("cookie", ""), - pair("date", ""), - pair("etag", ""), - pair("expect", ""), - pair("expires", ""), - pair("from", ""), - pair("host", ""), - pair("if-match", ""), - pair("if-modified-since", ""), - pair("if-none-match", ""), - pair("if-range", ""), - pair("if-unmodified-since", ""), - pair("last-modified", ""), - pair("link", ""), - pair("location", ""), - pair("max-forwards", ""), - pair("proxy-authenticate", ""), - pair("proxy-authorization", ""), - pair("range", ""), - pair("referer", ""), - pair("refresh", ""), - pair("retry-after", ""), - pair("server", ""), - pair("set-cookie", ""), - pair("strict-transport-security", ""), - pair("transfer-encoding", ""), - pair("user-agent", ""), - pair("vary", ""), - pair("via", ""), - pair("www-authenticate", ""), +var staticTable = newStaticTable() +var staticTableEntries = [...]HeaderField{ + {Name: ":authority"}, + {Name: ":method", Value: "GET"}, + {Name: ":method", Value: "POST"}, + {Name: ":path", Value: "/"}, + {Name: ":path", Value: "/index.html"}, + {Name: ":scheme", Value: "http"}, + {Name: ":scheme", Value: "https"}, + {Name: ":status", Value: "200"}, + {Name: ":status", Value: "204"}, + {Name: ":status", Value: "206"}, + {Name: ":status", Value: "304"}, + {Name: ":status", Value: "400"}, + {Name: ":status", Value: "404"}, + {Name: ":status", Value: "500"}, + {Name: "accept-charset"}, + {Name: "accept-encoding", Value: "gzip, deflate"}, + {Name: "accept-language"}, + {Name: "accept-ranges"}, + {Name: "accept"}, + {Name: "access-control-allow-origin"}, + {Name: "age"}, + {Name: "allow"}, + {Name: "authorization"}, + {Name: "cache-control"}, + {Name: "content-disposition"}, + {Name: "content-encoding"}, + {Name: "content-language"}, + {Name: "content-length"}, + {Name: "content-location"}, + {Name: "content-range"}, + {Name: "content-type"}, + {Name: "cookie"}, + {Name: "date"}, + {Name: "etag"}, + {Name: "expect"}, + {Name: "expires"}, + {Name: "from"}, + {Name: "host"}, + {Name: "if-match"}, + {Name: "if-modified-since"}, + {Name: "if-none-match"}, + {Name: "if-range"}, + {Name: "if-unmodified-since"}, + {Name: "last-modified"}, + {Name: "link"}, + {Name: "location"}, + {Name: "max-forwards"}, + {Name: "proxy-authenticate"}, + {Name: "proxy-authorization"}, + {Name: "range"}, + {Name: "referer"}, + {Name: "refresh"}, + {Name: "retry-after"}, + {Name: "server"}, + {Name: "set-cookie"}, + {Name: "strict-transport-security"}, + {Name: "transfer-encoding"}, + {Name: "user-agent"}, + {Name: "vary"}, + {Name: "via"}, + {Name: "www-authenticate"}, +} + +func newStaticTable() *headerFieldTable { + t := &headerFieldTable{} + t.init() + for _, e := range staticTableEntries[:] { + t.addEntry(e) + } + return t } var huffmanCodes = [256]uint32{ diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go index b6b0f9ad1..bdaba1d46 100644 --- a/vendor/golang.org/x/net/http2/http2.go +++ b/vendor/golang.org/x/net/http2/http2.go @@ -29,7 +29,7 @@ import ( "strings" "sync" - "golang.org/x/net/lex/httplex" + "golang.org/x/net/http/httpguts" ) var ( @@ -179,7 +179,7 @@ var ( ) // validWireHeaderFieldName reports whether v is a valid header field -// name (key). See httplex.ValidHeaderName for the base rules. +// name (key). See httpguts.ValidHeaderName for the base rules. // // Further, http2 says: // "Just as in HTTP/1.x, header field names are strings of ASCII @@ -191,7 +191,7 @@ func validWireHeaderFieldName(v string) bool { return false } for _, r := range v { - if !httplex.IsTokenRune(r) { + if !httpguts.IsTokenRune(r) { return false } if 'A' <= r && r <= 'Z' { @@ -201,19 +201,12 @@ func validWireHeaderFieldName(v string) bool { return true } -var httpCodeStringCommon = map[int]string{} // n -> strconv.Itoa(n) - -func init() { - for i := 100; i <= 999; i++ { - if v := http.StatusText(i); v != "" { - httpCodeStringCommon[i] = strconv.Itoa(i) - } - } -} - func httpCodeString(code int) string { - if s, ok := httpCodeStringCommon[code]; ok { - return s + switch code { + case 200: + return "200" + case 404: + return "404" } return strconv.Itoa(code) } @@ -312,7 +305,7 @@ func mustUint31(v int32) uint32 { } // bodyAllowedForStatus reports whether a given response status code -// permits a body. See RFC 2616, section 4.4. +// permits a body. See RFC 7230, section 3.3. func bodyAllowedForStatus(status int) bool { switch { case status >= 100 && status <= 199: @@ -376,12 +369,16 @@ func (s *sorter) SortStrings(ss []string) { // validPseudoPath reports whether v is a valid :path pseudo-header // value. It must be either: // -// *) a non-empty string starting with '/', but not with with "//", +// *) a non-empty string starting with '/' // *) the string '*', for OPTIONS requests. // // For now this is only used a quick check for deciding when to clean // up Opaque URLs before sending requests from the Transport. // See golang.org/issue/16847 +// +// We used to enforce that the path also didn't start with "//", but +// Google's GFE accepts such paths and Chrome sends them, so ignore +// that part of the spec. See golang.org/issue/19103. func validPseudoPath(v string) bool { - return (len(v) > 0 && v[0] == '/' && (len(v) == 1 || v[1] != '/')) || v == "*" + return (len(v) > 0 && v[0] == '/') || v == "*" } diff --git a/vendor/golang.org/x/net/http2/not_go111.go b/vendor/golang.org/x/net/http2/not_go111.go new file mode 100644 index 000000000..161bca7ce --- /dev/null +++ b/vendor/golang.org/x/net/http2/not_go111.go @@ -0,0 +1,20 @@ +// Copyright 2018 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !go1.11 + +package http2 + +import ( + "net/http/httptrace" + "net/textproto" +) + +func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool { return false } + +func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {} + +func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error { + return nil +} diff --git a/vendor/golang.org/x/net/http2/not_go16.go b/vendor/golang.org/x/net/http2/not_go16.go deleted file mode 100644 index efd2e1282..000000000 --- a/vendor/golang.org/x/net/http2/not_go16.go +++ /dev/null @@ -1,46 +0,0 @@ -// Copyright 2015 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.6 - -package http2 - -import ( - "crypto/tls" - "net/http" - "time" -) - -func configureTransport(t1 *http.Transport) (*Transport, error) { - return nil, errTransportVersion -} - -func transportExpectContinueTimeout(t1 *http.Transport) time.Duration { - return 0 - -} - -// isBadCipher reports whether the cipher is blacklisted by the HTTP/2 spec. -func isBadCipher(cipher uint16) bool { - switch cipher { - case tls.TLS_RSA_WITH_RC4_128_SHA, - tls.TLS_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_RSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_ECDSA_WITH_AES_256_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_RC4_128_SHA, - tls.TLS_ECDHE_RSA_WITH_3DES_EDE_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA, - tls.TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA: - // Reject cipher suites from Appendix A. - // "This list includes those cipher suites that do not - // offer an ephemeral key exchange and those that are - // based on the TLS null, stream or block cipher type" - return true - default: - return false - } -} diff --git a/vendor/golang.org/x/net/http2/not_go17.go b/vendor/golang.org/x/net/http2/not_go17.go deleted file mode 100644 index 140434a79..000000000 --- a/vendor/golang.org/x/net/http2/not_go17.go +++ /dev/null @@ -1,87 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.7 - -package http2 - -import ( - "crypto/tls" - "net" - "net/http" - "time" -) - -type contextContext interface { - Done() <-chan struct{} - Err() error -} - -type fakeContext struct{} - -func (fakeContext) Done() <-chan struct{} { return nil } -func (fakeContext) Err() error { panic("should not be called") } - -func reqContext(r *http.Request) fakeContext { - return fakeContext{} -} - -func setResponseUncompressed(res *http.Response) { - // Nothing. -} - -type clientTrace struct{} - -func requestTrace(*http.Request) *clientTrace { return nil } -func traceGotConn(*http.Request, *ClientConn) {} -func traceFirstResponseByte(*clientTrace) {} -func traceWroteHeaders(*clientTrace) {} -func traceWroteRequest(*clientTrace, error) {} -func traceGot100Continue(trace *clientTrace) {} -func traceWait100Continue(trace *clientTrace) {} - -func nop() {} - -func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx contextContext, cancel func()) { - return nil, nop -} - -func contextWithCancel(ctx contextContext) (_ contextContext, cancel func()) { - return ctx, nop -} - -func requestWithContext(req *http.Request, ctx contextContext) *http.Request { - return req -} - -// temporary copy of Go 1.6's private tls.Config.clone: -func cloneTLSConfig(c *tls.Config) *tls.Config { - return &tls.Config{ - Rand: c.Rand, - Time: c.Time, - Certificates: c.Certificates, - NameToCertificate: c.NameToCertificate, - GetCertificate: c.GetCertificate, - RootCAs: c.RootCAs, - NextProtos: c.NextProtos, - ServerName: c.ServerName, - ClientAuth: c.ClientAuth, - ClientCAs: c.ClientCAs, - InsecureSkipVerify: c.InsecureSkipVerify, - CipherSuites: c.CipherSuites, - PreferServerCipherSuites: c.PreferServerCipherSuites, - SessionTicketsDisabled: c.SessionTicketsDisabled, - SessionTicketKey: c.SessionTicketKey, - ClientSessionCache: c.ClientSessionCache, - MinVersion: c.MinVersion, - MaxVersion: c.MaxVersion, - CurvePreferences: c.CurvePreferences, - } -} - -func (cc *ClientConn) Ping(ctx contextContext) error { - return cc.ping(ctx) -} - -func (t *Transport) idleConnTimeout() time.Duration { return 0 } diff --git a/vendor/golang.org/x/net/http2/not_go18.go b/vendor/golang.org/x/net/http2/not_go18.go deleted file mode 100644 index efbf83c32..000000000 --- a/vendor/golang.org/x/net/http2/not_go18.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2016 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// +build !go1.8 - -package http2 - -import ( - "io" - "net/http" -) - -func configureServer18(h1 *http.Server, h2 *Server) error { - // No IdleTimeout to sync prior to Go 1.8. - return nil -} - -func shouldLogPanic(panicValue interface{}) bool { - return panicValue != nil -} - -func reqGetBody(req *http.Request) func() (io.ReadCloser, error) { - return nil -} - -func reqBodyIsNoBody(io.ReadCloser) bool { return false } diff --git a/vendor/golang.org/x/net/http2/pipe.go b/vendor/golang.org/x/net/http2/pipe.go index 53b7a1daf..a6140099c 100644 --- a/vendor/golang.org/x/net/http2/pipe.go +++ b/vendor/golang.org/x/net/http2/pipe.go @@ -10,13 +10,13 @@ import ( "sync" ) -// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like +// pipe is a goroutine-safe io.Reader/io.Writer pair. It's like // io.Pipe except there are no PipeReader/PipeWriter halves, and the // underlying buffer is an interface. (io.Pipe is always unbuffered) type pipe struct { mu sync.Mutex - c sync.Cond // c.L lazily initialized to &p.mu - b pipeBuffer + c sync.Cond // c.L lazily initialized to &p.mu + b pipeBuffer // nil when done reading err error // read error once empty. non-nil means closed. breakErr error // immediate read error (caller doesn't see rest of b) donec chan struct{} // closed on error @@ -32,6 +32,9 @@ type pipeBuffer interface { func (p *pipe) Len() int { p.mu.Lock() defer p.mu.Unlock() + if p.b == nil { + return 0 + } return p.b.Len() } @@ -47,7 +50,7 @@ func (p *pipe) Read(d []byte) (n int, err error) { if p.breakErr != nil { return 0, p.breakErr } - if p.b.Len() > 0 { + if p.b != nil && p.b.Len() > 0 { return p.b.Read(d) } if p.err != nil { @@ -55,6 +58,7 @@ func (p *pipe) Read(d []byte) (n int, err error) { p.readFn() // e.g. copy trailers p.readFn = nil // not sticky like p.err } + p.b = nil return 0, p.err } p.c.Wait() @@ -75,6 +79,9 @@ func (p *pipe) Write(d []byte) (n int, err error) { if p.err != nil { return 0, errClosedPipeWrite } + if p.breakErr != nil { + return len(d), nil // discard when there is no reader + } return p.b.Write(d) } @@ -109,6 +116,9 @@ func (p *pipe) closeWithError(dst *error, err error, fn func()) { return } p.readFn = fn + if dst == &p.breakErr { + p.b = nil + } *dst = err p.closeDoneLocked() } diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go index 3c6b90ccd..b57b6e2d0 100644 --- a/vendor/golang.org/x/net/http2/server.go +++ b/vendor/golang.org/x/net/http2/server.go @@ -28,6 +28,7 @@ package http2 import ( "bufio" "bytes" + "context" "crypto/tls" "errors" "fmt" @@ -46,6 +47,7 @@ import ( "sync" "time" + "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" ) @@ -110,9 +112,41 @@ type Server struct { // activity for the purposes of IdleTimeout. IdleTimeout time.Duration + // MaxUploadBufferPerConnection is the size of the initial flow + // control window for each connections. The HTTP/2 spec does not + // allow this to be smaller than 65535 or larger than 2^32-1. + // If the value is outside this range, a default value will be + // used instead. + MaxUploadBufferPerConnection int32 + + // MaxUploadBufferPerStream is the size of the initial flow control + // window for each stream. The HTTP/2 spec does not allow this to + // be larger than 2^32-1. If the value is zero or larger than the + // maximum, a default value will be used instead. + MaxUploadBufferPerStream int32 + // NewWriteScheduler constructs a write scheduler for a connection. // If nil, a default scheduler is chosen. NewWriteScheduler func() WriteScheduler + + // Internal state. This is a pointer (rather than embedded directly) + // so that we don't embed a Mutex in this struct, which will make the + // struct non-copyable, which might break some callers. + state *serverInternalState +} + +func (s *Server) initialConnRecvWindowSize() int32 { + if s.MaxUploadBufferPerConnection > initialWindowSize { + return s.MaxUploadBufferPerConnection + } + return 1 << 20 +} + +func (s *Server) initialStreamRecvWindowSize() int32 { + if s.MaxUploadBufferPerStream > 0 { + return s.MaxUploadBufferPerStream + } + return 1 << 20 } func (s *Server) maxReadFrameSize() uint32 { @@ -129,6 +163,40 @@ func (s *Server) maxConcurrentStreams() uint32 { return defaultMaxStreams } +type serverInternalState struct { + mu sync.Mutex + activeConns map[*serverConn]struct{} +} + +func (s *serverInternalState) registerConn(sc *serverConn) { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + s.activeConns[sc] = struct{}{} + s.mu.Unlock() +} + +func (s *serverInternalState) unregisterConn(sc *serverConn) { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + delete(s.activeConns, sc) + s.mu.Unlock() +} + +func (s *serverInternalState) startGracefulShutdown() { + if s == nil { + return // if the Server was used without calling ConfigureServer + } + s.mu.Lock() + for sc := range s.activeConns { + sc.startGracefulShutdown() + } + s.mu.Unlock() +} + // ConfigureServer adds HTTP/2 support to a net/http Server. // // The configuration conf may be nil. @@ -141,21 +209,30 @@ func ConfigureServer(s *http.Server, conf *Server) error { if conf == nil { conf = new(Server) } - if err := configureServer18(s, conf); err != nil { - return err + conf.state = &serverInternalState{activeConns: make(map[*serverConn]struct{})} + if h1, h2 := s, conf; h2.IdleTimeout == 0 { + if h1.IdleTimeout != 0 { + h2.IdleTimeout = h1.IdleTimeout + } else { + h2.IdleTimeout = h1.ReadTimeout + } } + s.RegisterOnShutdown(conf.state.startGracefulShutdown) if s.TLSConfig == nil { s.TLSConfig = new(tls.Config) } else if s.TLSConfig.CipherSuites != nil { // If they already provided a CipherSuite list, return // an error if it has a bad order or is missing - // ECDHE_RSA_WITH_AES_128_GCM_SHA256. - const requiredCipher = tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 + // ECDHE_RSA_WITH_AES_128_GCM_SHA256 or ECDHE_ECDSA_WITH_AES_128_GCM_SHA256. haveRequired := false sawBad := false for i, cs := range s.TLSConfig.CipherSuites { - if cs == requiredCipher { + switch cs { + case tls.TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256, + // Alternative MTI cipher to not discourage ECDSA-only servers. + // See http://golang.org/cl/30721 for further information. + tls.TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256: haveRequired = true } if isBadCipher(cs) { @@ -165,7 +242,7 @@ func ConfigureServer(s *http.Server, conf *Server) error { } } if !haveRequired { - return fmt.Errorf("http2: TLSConfig.CipherSuites is missing HTTP/2-required TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256") + return fmt.Errorf("http2: TLSConfig.CipherSuites is missing an HTTP/2-required AES_128_GCM_SHA256 cipher.") } } @@ -255,35 +332,37 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { defer cancel() sc := &serverConn{ - srv: s, - hs: opts.baseConfig(), - conn: c, - baseCtx: baseCtx, - remoteAddrStr: c.RemoteAddr().String(), - bw: newBufferedWriter(c), - handler: opts.handler(), - streams: make(map[uint32]*stream), - readFrameCh: make(chan readFrameResult), - wantWriteFrameCh: make(chan FrameWriteRequest, 8), - wantStartPushCh: make(chan startPushRequest, 8), - wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync - bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way - doneServing: make(chan struct{}), - clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" - advMaxStreams: s.maxConcurrentStreams(), - initialWindowSize: initialWindowSize, - maxFrameSize: initialMaxFrameSize, - headerTableSize: initialHeaderTableSize, - serveG: newGoroutineLock(), - pushEnabled: true, - } + srv: s, + hs: opts.baseConfig(), + conn: c, + baseCtx: baseCtx, + remoteAddrStr: c.RemoteAddr().String(), + bw: newBufferedWriter(c), + handler: opts.handler(), + streams: make(map[uint32]*stream), + readFrameCh: make(chan readFrameResult), + wantWriteFrameCh: make(chan FrameWriteRequest, 8), + serveMsgCh: make(chan interface{}, 8), + wroteFrameCh: make(chan frameWriteResult, 1), // buffered; one send in writeFrameAsync + bodyReadCh: make(chan bodyReadMsg), // buffering doesn't matter either way + doneServing: make(chan struct{}), + clientMaxStreams: math.MaxUint32, // Section 6.5.2: "Initially, there is no limit to this value" + advMaxStreams: s.maxConcurrentStreams(), + initialStreamSendWindowSize: initialWindowSize, + maxFrameSize: initialMaxFrameSize, + headerTableSize: initialHeaderTableSize, + serveG: newGoroutineLock(), + pushEnabled: true, + } + + s.state.registerConn(sc) + defer s.state.unregisterConn(sc) // The net/http package sets the write deadline from the // http.Server.WriteTimeout during the TLS handshake, but then - // passes the connection off to us with the deadline already - // set. Disarm it here so that it is not applied to additional - // streams opened on this connection. - // TODO: implement WriteTimeout fully. See Issue 18437. + // passes the connection off to us with the deadline already set. + // Write deadlines are set per stream in serverConn.newStream. + // Disarm the net.Conn write deadline here. if sc.hs.WriteTimeout != 0 { sc.conn.SetWriteDeadline(time.Time{}) } @@ -294,6 +373,9 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { sc.writeSched = NewRandomWriteScheduler() } + // These start at the RFC-specified defaults. If there is a higher + // configured value for inflow, that will be updated when we send a + // WINDOW_UPDATE shortly after sending SETTINGS. sc.flow.add(initialWindowSize) sc.inflow.add(initialWindowSize) sc.hpackEncoder = hpack.NewEncoder(&sc.headerWriteBuf) @@ -328,7 +410,7 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { // addresses during development. // // TODO: optionally enforce? Or enforce at the time we receive - // a new request, and verify the the ServerName matches the :authority? + // a new request, and verify the ServerName matches the :authority? // But that precludes proxy situations, perhaps. // // So for now, do nothing here again. @@ -356,6 +438,15 @@ func (s *Server) ServeConn(c net.Conn, opts *ServeConnOpts) { sc.serve() } +func serverConnBaseContext(c net.Conn, opts *ServeConnOpts) (ctx context.Context, cancel func()) { + ctx, cancel = context.WithCancel(context.Background()) + ctx = context.WithValue(ctx, http.LocalAddrContextKey, c.LocalAddr()) + if hs := opts.baseConfig(); hs != nil { + ctx = context.WithValue(ctx, http.ServerContextKey, hs) + } + return +} + func (sc *serverConn) rejectConn(err ErrCode, debug string) { sc.vlogf("http2: server rejecting conn: %v, %s", err, debug) // ignoring errors. hanging up anyway. @@ -371,15 +462,14 @@ type serverConn struct { conn net.Conn bw *bufferedWriter // writing to conn handler http.Handler - baseCtx contextContext + baseCtx context.Context framer *Framer doneServing chan struct{} // closed when serverConn.serve ends readFrameCh chan readFrameResult // written by serverConn.readFrames wantWriteFrameCh chan FrameWriteRequest // from handlers -> serve - wantStartPushCh chan startPushRequest // from handlers -> serve wroteFrameCh chan frameWriteResult // from writeFrameAsync -> serve, tickles more frame writes bodyReadCh chan bodyReadMsg // from handlers -> serve - testHookCh chan func(int) // code to run on the serve loop + serveMsgCh chan interface{} // misc messages & code to send to / run on the serve loop flow flow // conn-wide (not stream-specific) outbound flow control inflow flow // conn-wide inbound flow control tlsState *tls.ConnectionState // shared by all handlers, like net/http @@ -387,38 +477,39 @@ type serverConn struct { writeSched WriteScheduler // Everything following is owned by the serve loop; use serveG.check(): - serveG goroutineLock // used to verify funcs are on serve() - pushEnabled bool - sawFirstSettings bool // got the initial SETTINGS frame after the preface - needToSendSettingsAck bool - unackedSettings int // how many SETTINGS have we sent without ACKs? - clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) - advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client - curClientStreams uint32 // number of open streams initiated by the client - curPushedStreams uint32 // number of open streams initiated by server push - maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests - maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes - streams map[uint32]*stream - initialWindowSize int32 - maxFrameSize int32 - headerTableSize uint32 - peerMaxHeaderListSize uint32 // zero means unknown (default) - canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case - writingFrame bool // started writing a frame (on serve goroutine or separate) - writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh - needsFrameFlush bool // last frame write wasn't a flush - inGoAway bool // we've started to or sent GOAWAY - inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop - needToSendGoAway bool // we need to schedule a GOAWAY frame write - goAwayCode ErrCode - shutdownTimerCh <-chan time.Time // nil until used - shutdownTimer *time.Timer // nil until used - idleTimer *time.Timer // nil if unused - idleTimerCh <-chan time.Time // nil if unused + serveG goroutineLock // used to verify funcs are on serve() + pushEnabled bool + sawFirstSettings bool // got the initial SETTINGS frame after the preface + needToSendSettingsAck bool + unackedSettings int // how many SETTINGS have we sent without ACKs? + clientMaxStreams uint32 // SETTINGS_MAX_CONCURRENT_STREAMS from client (our PUSH_PROMISE limit) + advMaxStreams uint32 // our SETTINGS_MAX_CONCURRENT_STREAMS advertised the client + curClientStreams uint32 // number of open streams initiated by the client + curPushedStreams uint32 // number of open streams initiated by server push + maxClientStreamID uint32 // max ever seen from client (odd), or 0 if there have been no client requests + maxPushPromiseID uint32 // ID of the last push promise (even), or 0 if there have been no pushes + streams map[uint32]*stream + initialStreamSendWindowSize int32 + maxFrameSize int32 + headerTableSize uint32 + peerMaxHeaderListSize uint32 // zero means unknown (default) + canonHeader map[string]string // http2-lower-case -> Go-Canonical-Case + writingFrame bool // started writing a frame (on serve goroutine or separate) + writingFrameAsync bool // started a frame on its own goroutine but haven't heard back on wroteFrameCh + needsFrameFlush bool // last frame write wasn't a flush + inGoAway bool // we've started to or sent GOAWAY + inFrameScheduleLoop bool // whether we're in the scheduleFrameWrite loop + needToSendGoAway bool // we need to schedule a GOAWAY frame write + goAwayCode ErrCode + shutdownTimer *time.Timer // nil until used + idleTimer *time.Timer // nil if unused // Owned by the writeFrameAsync goroutine: headerWriteBuf bytes.Buffer hpackEncoder *hpack.Encoder + + // Used by startGracefulShutdown. + shutdownOnce sync.Once } func (sc *serverConn) maxHeaderListSize() uint32 { @@ -451,7 +542,7 @@ type stream struct { id uint32 body *pipe // non-nil if expecting DATA frames cw closeWaiter // closed wait stream transitions to closed state - ctx contextContext + ctx context.Context cancelCtx func() // owned by serverConn's serve loop: @@ -463,10 +554,10 @@ type stream struct { numTrailerValues int64 weight uint8 state streamState - resetQueued bool // RST_STREAM queued for write; set by sc.resetStream - gotTrailerHeader bool // HEADER frame for trailers was seen - wroteHeaders bool // whether we wrote headers (not status 100) - reqBuf []byte // if non-nil, body pipe buffer to return later at EOF + resetQueued bool // RST_STREAM queued for write; set by sc.resetStream + gotTrailerHeader bool // HEADER frame for trailers was seen + wroteHeaders bool // whether we wrote headers (not status 100) + writeDeadline *time.Timer // nil if unused trailer http.Header // accumulated trailers reqTrailer http.Header // handler's Request.Trailer @@ -574,7 +665,7 @@ func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { if err == nil { return } - if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) { + if err == io.EOF || err == io.ErrUnexpectedEOF || isClosedConnError(err) || err == errPrefaceTimeout { // Boring, expected errors. sc.vlogf(format, args...) } else { @@ -584,6 +675,7 @@ func (sc *serverConn) condlogf(err error, format string, args ...interface{}) { func (sc *serverConn) canonicalHeader(v string) string { sc.serveG.check() + buildCommonHeaderMapsOnce() cv, ok := commonCanonHeader[v] if ok { return cv @@ -696,48 +788,48 @@ func (sc *serverConn) serve() { {SettingMaxFrameSize, sc.srv.maxReadFrameSize()}, {SettingMaxConcurrentStreams, sc.advMaxStreams}, {SettingMaxHeaderListSize, sc.maxHeaderListSize()}, - - // TODO: more actual settings, notably - // SettingInitialWindowSize, but then we also - // want to bump up the conn window size the - // same amount here right after the settings + {SettingInitialWindowSize, uint32(sc.srv.initialStreamRecvWindowSize())}, }, }) sc.unackedSettings++ + // Each connection starts with intialWindowSize inflow tokens. + // If a higher value is configured, we add more tokens. + if diff := sc.srv.initialConnRecvWindowSize() - initialWindowSize; diff > 0 { + sc.sendWindowUpdate(nil, int(diff)) + } + if err := sc.readPreface(); err != nil { sc.condlogf(err, "http2: server: error reading preface from client %v: %v", sc.conn.RemoteAddr(), err) return } // Now that we've got the preface, get us out of the - // "StateNew" state. We can't go directly to idle, though. + // "StateNew" state. We can't go directly to idle, though. // Active means we read some data and anticipate a request. We'll // do another Active when we get a HEADERS frame. sc.setConnState(http.StateActive) sc.setConnState(http.StateIdle) if sc.srv.IdleTimeout != 0 { - sc.idleTimer = time.NewTimer(sc.srv.IdleTimeout) + sc.idleTimer = time.AfterFunc(sc.srv.IdleTimeout, sc.onIdleTimer) defer sc.idleTimer.Stop() - sc.idleTimerCh = sc.idleTimer.C - } - - var gracefulShutdownCh <-chan struct{} - if sc.hs != nil { - gracefulShutdownCh = h1ServerShutdownChan(sc.hs) } go sc.readFrames() // closed by defer sc.conn.Close above - settingsTimer := time.NewTimer(firstSettingsTimeout) + settingsTimer := time.AfterFunc(firstSettingsTimeout, sc.onSettingsTimer) + defer settingsTimer.Stop() + loopNum := 0 for { loopNum++ select { case wr := <-sc.wantWriteFrameCh: + if se, ok := wr.write.(StreamError); ok { + sc.resetStream(se) + break + } sc.writeFrame(wr) - case spr := <-sc.wantStartPushCh: - sc.startPush(spr) case res := <-sc.wroteFrameCh: sc.wroteFrame(res) case res := <-sc.readFrameCh: @@ -745,36 +837,85 @@ func (sc *serverConn) serve() { return } res.readMore() - if settingsTimer.C != nil { + if settingsTimer != nil { settingsTimer.Stop() - settingsTimer.C = nil + settingsTimer = nil } case m := <-sc.bodyReadCh: sc.noteBodyRead(m.st, m.n) - case <-settingsTimer.C: - sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) - return - case <-gracefulShutdownCh: - gracefulShutdownCh = nil - sc.startGracefulShutdown() - case <-sc.shutdownTimerCh: - sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) - return - case <-sc.idleTimerCh: - sc.vlogf("connection is idle") - sc.goAway(ErrCodeNo) - case fn := <-sc.testHookCh: - fn(loopNum) + case msg := <-sc.serveMsgCh: + switch v := msg.(type) { + case func(int): + v(loopNum) // for testing + case *serverMessage: + switch v { + case settingsTimerMsg: + sc.logf("timeout waiting for SETTINGS frames from %v", sc.conn.RemoteAddr()) + return + case idleTimerMsg: + sc.vlogf("connection is idle") + sc.goAway(ErrCodeNo) + case shutdownTimerMsg: + sc.vlogf("GOAWAY close timer fired; closing conn from %v", sc.conn.RemoteAddr()) + return + case gracefulShutdownMsg: + sc.startGracefulShutdownInternal() + default: + panic("unknown timer") + } + case *startPushRequest: + sc.startPush(v) + default: + panic(fmt.Sprintf("unexpected type %T", v)) + } } - if sc.inGoAway && sc.curOpenStreams() == 0 && !sc.needToSendGoAway && !sc.writingFrame { - return + // Start the shutdown timer after sending a GOAWAY. When sending GOAWAY + // with no error code (graceful shutdown), don't start the timer until + // all open streams have been completed. + sentGoAway := sc.inGoAway && !sc.needToSendGoAway && !sc.writingFrame + gracefulShutdownComplete := sc.goAwayCode == ErrCodeNo && sc.curOpenStreams() == 0 + if sentGoAway && sc.shutdownTimer == nil && (sc.goAwayCode != ErrCodeNo || gracefulShutdownComplete) { + sc.shutDownIn(goAwayTimeout) } } } -// readPreface reads the ClientPreface greeting from the peer -// or returns an error on timeout or an invalid greeting. +func (sc *serverConn) awaitGracefulShutdown(sharedCh <-chan struct{}, privateCh chan struct{}) { + select { + case <-sc.doneServing: + case <-sharedCh: + close(privateCh) + } +} + +type serverMessage int + +// Message values sent to serveMsgCh. +var ( + settingsTimerMsg = new(serverMessage) + idleTimerMsg = new(serverMessage) + shutdownTimerMsg = new(serverMessage) + gracefulShutdownMsg = new(serverMessage) +) + +func (sc *serverConn) onSettingsTimer() { sc.sendServeMsg(settingsTimerMsg) } +func (sc *serverConn) onIdleTimer() { sc.sendServeMsg(idleTimerMsg) } +func (sc *serverConn) onShutdownTimer() { sc.sendServeMsg(shutdownTimerMsg) } + +func (sc *serverConn) sendServeMsg(msg interface{}) { + sc.serveG.checkNotOn() // NOT + select { + case sc.serveMsgCh <- msg: + case <-sc.doneServing: + } +} + +var errPrefaceTimeout = errors.New("timeout waiting for client preface") + +// readPreface reads the ClientPreface greeting from the peer or +// returns errPrefaceTimeout on timeout, or an error if the greeting +// is invalid. func (sc *serverConn) readPreface() error { errc := make(chan error, 1) go func() { @@ -792,7 +933,7 @@ func (sc *serverConn) readPreface() error { defer timer.Stop() select { case <-timer.C: - return errors.New("timeout waiting for client preface") + return errPrefaceTimeout case err := <-errc: if err == nil { if VerboseLogs { @@ -981,7 +1122,7 @@ func (sc *serverConn) startFrameWrite(wr FrameWriteRequest) { // errHandlerPanicked is the error given to any callers blocked in a read from // Request.Body when the main goroutine panics. Since most handlers read in the -// the main ServeHTTP goroutine, this will show up rarely. +// main ServeHTTP goroutine, this will show up rarely. var errHandlerPanicked = errors.New("http2: handler panicked") // wroteFrame is called on the serve goroutine with the result of @@ -1014,7 +1155,11 @@ func (sc *serverConn) wroteFrame(res frameWriteResult) { // stateClosed after the RST_STREAM frame is // written. st.state = stateHalfClosedLocal - sc.resetStream(streamError(st.id, ErrCodeCancel)) + // Section 8.1: a server MAY request that the client abort + // transmission of a request without error by sending a + // RST_STREAM with an error code of NO_ERROR after sending + // a complete response. + sc.resetStream(streamError(st.id, ErrCodeNo)) case stateHalfClosedRemote: sc.closeStream(st, errHandlerComplete) } @@ -1086,33 +1231,43 @@ func (sc *serverConn) scheduleFrameWrite() { sc.inFrameScheduleLoop = false } -// startGracefulShutdown sends a GOAWAY with ErrCodeNo to tell the -// client we're gracefully shutting down. The connection isn't closed -// until all current streams are done. +// startGracefulShutdown gracefully shuts down a connection. This +// sends GOAWAY with ErrCodeNo to tell the client we're gracefully +// shutting down. The connection isn't closed until all current +// streams are done. +// +// startGracefulShutdown returns immediately; it does not wait until +// the connection has shut down. func (sc *serverConn) startGracefulShutdown() { - sc.goAwayIn(ErrCodeNo, 0) + sc.serveG.checkNotOn() // NOT + sc.shutdownOnce.Do(func() { sc.sendServeMsg(gracefulShutdownMsg) }) } -func (sc *serverConn) goAway(code ErrCode) { - sc.serveG.check() - var forceCloseIn time.Duration - if code != ErrCodeNo { - forceCloseIn = 250 * time.Millisecond - } else { - // TODO: configurable - forceCloseIn = 1 * time.Second - } - sc.goAwayIn(code, forceCloseIn) +// After sending GOAWAY, the connection will close after goAwayTimeout. +// If we close the connection immediately after sending GOAWAY, there may +// be unsent data in our kernel receive buffer, which will cause the kernel +// to send a TCP RST on close() instead of a FIN. This RST will abort the +// connection immediately, whether or not the client had received the GOAWAY. +// +// Ideally we should delay for at least 1 RTT + epsilon so the client has +// a chance to read the GOAWAY and stop sending messages. Measuring RTT +// is hard, so we approximate with 1 second. See golang.org/issue/18701. +// +// This is a var so it can be shorter in tests, where all requests uses the +// loopback interface making the expected RTT very small. +// +// TODO: configurable? +var goAwayTimeout = 1 * time.Second + +func (sc *serverConn) startGracefulShutdownInternal() { + sc.goAway(ErrCodeNo) } -func (sc *serverConn) goAwayIn(code ErrCode, forceCloseIn time.Duration) { +func (sc *serverConn) goAway(code ErrCode) { sc.serveG.check() if sc.inGoAway { return } - if forceCloseIn != 0 { - sc.shutDownIn(forceCloseIn) - } sc.inGoAway = true sc.needToSendGoAway = true sc.goAwayCode = code @@ -1121,8 +1276,7 @@ func (sc *serverConn) goAwayIn(code ErrCode, forceCloseIn time.Duration) { func (sc *serverConn) shutDownIn(d time.Duration) { sc.serveG.check() - sc.shutdownTimer = time.NewTimer(d) - sc.shutdownTimerCh = sc.shutdownTimer.C + sc.shutdownTimer = time.AfterFunc(d, sc.onShutdownTimer) } func (sc *serverConn) resetStream(se StreamError) { @@ -1305,6 +1459,9 @@ func (sc *serverConn) closeStream(st *stream, err error) { panic(fmt.Sprintf("invariant; can't close stream in state %v", st.state)) } st.state = stateClosed + if st.writeDeadline != nil { + st.writeDeadline.Stop() + } if st.isPushed() { sc.curPushedStreams-- } else { @@ -1317,7 +1474,7 @@ func (sc *serverConn) closeStream(st *stream, err error) { sc.idleTimer.Reset(sc.srv.IdleTimeout) } if h1ServerKeepAlivesDisabled(sc.hs) { - sc.startGracefulShutdown() + sc.startGracefulShutdownInternal() } } if p := st.body; p != nil { @@ -1343,6 +1500,12 @@ func (sc *serverConn) processSettings(f *SettingsFrame) error { } return nil } + if f.NumSettings() > 100 || f.HasDuplicates() { + // This isn't actually in the spec, but hang up on + // suspiciously large settings frames or those with + // duplicate entries. + return ConnectionError(ErrCodeProtocol) + } if err := f.ForeachSetting(sc.processSetting); err != nil { return err } @@ -1395,9 +1558,9 @@ func (sc *serverConn) processSettingInitialWindowSize(val uint32) error { // adjust the size of all stream flow control windows that it // maintains by the difference between the new value and the // old value." - old := sc.initialWindowSize - sc.initialWindowSize = int32(val) - growth := sc.initialWindowSize - old // may be negative + old := sc.initialStreamSendWindowSize + sc.initialStreamSendWindowSize = int32(val) + growth := int32(val) - old // may be negative for _, st := range sc.streams { if !st.flow.add(growth) { // 6.9.2 Initial Flow Control Window Size @@ -1431,6 +1594,12 @@ func (sc *serverConn) processData(f *DataFrame) error { // type PROTOCOL_ERROR." return ConnectionError(ErrCodeProtocol) } + // RFC 7540, sec 6.1: If a DATA frame is received whose stream is not in + // "open" or "half-closed (local)" state, the recipient MUST respond with a + // stream error (Section 5.4.2) of type STREAM_CLOSED. + if state == stateClosed { + return streamError(id, ErrCodeStreamClosed) + } if st == nil || state != stateOpen || st.gotTrailerHeader || st.resetQueued { // This includes sending a RST_STREAM if the stream is // in stateHalfClosedLocal (which currently means that @@ -1464,7 +1633,10 @@ func (sc *serverConn) processData(f *DataFrame) error { // Sender sending more than they'd declared? if st.declBodyBytes != -1 && st.bodyBytes+int64(len(data)) > st.declBodyBytes { st.body.CloseWithError(fmt.Errorf("sender tried to send more than declared Content-Length of %d bytes", st.declBodyBytes)) - return streamError(id, ErrCodeStreamClosed) + // RFC 7540, sec 8.1.2.6: A request or response is also malformed if the + // value of a content-length header field does not equal the sum of the + // DATA frame payload lengths that form the body. + return streamError(id, ErrCodeProtocol) } if f.Length > 0 { // Check whether the client has flow control quota. @@ -1504,7 +1676,7 @@ func (sc *serverConn) processGoAway(f *GoAwayFrame) error { } else { sc.vlogf("http2: received GOAWAY %+v, starting graceful shutdown", f) } - sc.startGracefulShutdown() + sc.startGracefulShutdownInternal() // http://tools.ietf.org/html/rfc7540#section-6.8 // We should not create any new streams, which means we should disable push. sc.pushEnabled = false @@ -1543,6 +1715,12 @@ func (st *stream) copyTrailersToHandlerRequest() { } } +// onWriteTimeout is run on its own goroutine (from time.AfterFunc) +// when the stream's WriteTimeout has fired. +func (st *stream) onWriteTimeout() { + st.sc.writeFrameFromHandler(FrameWriteRequest{write: streamError(st.id, ErrCodeInternal)}) +} + func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { sc.serveG.check() id := f.StreamID @@ -1568,6 +1746,13 @@ func (sc *serverConn) processHeaders(f *MetaHeadersFrame) error { // processing this frame. return nil } + // RFC 7540, sec 5.1: If an endpoint receives additional frames, other than + // WINDOW_UPDATE, PRIORITY, or RST_STREAM, for a stream that is in + // this state, it MUST respond with a stream error (Section 5.4.2) of + // type STREAM_CLOSED. + if st.state == stateHalfClosedRemote { + return streamError(id, ErrCodeStreamClosed) + } return st.processTrailerHeaders(f) } @@ -1668,7 +1853,7 @@ func (st *stream) processTrailerHeaders(f *MetaHeadersFrame) error { if st.trailer != nil { for _, hf := range f.RegularFields() { key := sc.canonicalHeader(hf.Name) - if !ValidTrailerHeader(key) { + if !httpguts.ValidTrailerHeader(key) { // TODO: send more details to the peer somehow. But http2 has // no way to send debug data at a stream level. Discuss with // HTTP folk. @@ -1709,7 +1894,7 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream panic("internal error: cannot create stream with id 0") } - ctx, cancelCtx := contextWithCancel(sc.baseCtx) + ctx, cancelCtx := context.WithCancel(sc.baseCtx) st := &stream{ sc: sc, id: id, @@ -1719,9 +1904,12 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream } st.cw.Init() st.flow.conn = &sc.flow // link to conn-level counter - st.flow.add(sc.initialWindowSize) - st.inflow.conn = &sc.inflow // link to conn-level counter - st.inflow.add(initialWindowSize) // TODO: update this when we send a higher initial window size in the initial settings + st.flow.add(sc.initialStreamSendWindowSize) + st.inflow.conn = &sc.inflow // link to conn-level counter + st.inflow.add(sc.srv.initialStreamRecvWindowSize()) + if sc.hs.WriteTimeout != 0 { + st.writeDeadline = time.AfterFunc(sc.hs.WriteTimeout, st.onWriteTimeout) + } sc.streams[id] = st sc.writeSched.OpenStream(st.id, OpenStreamOptions{PusherID: pusherID}) @@ -1785,16 +1973,14 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res return nil, nil, err } if bodyOpen { - st.reqBuf = getRequestBodyBuf() - req.Body.(*requestBody).pipe = &pipe{ - b: &fixedBuffer{buf: st.reqBuf}, - } - if vv, ok := rp.header["Content-Length"]; ok { req.ContentLength, _ = strconv.ParseInt(vv[0], 10, 64) } else { req.ContentLength = -1 } + req.Body.(*requestBody).pipe = &pipe{ + b: &dataBuffer{expected: req.ContentLength}, + } } return rw, req, nil } @@ -1874,7 +2060,7 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r Body: body, Trailer: trailer, } - req = requestWithContext(req, st.ctx) + req = req.WithContext(st.ctx) rws := responseWriterStatePool.Get().(*responseWriterState) bwSave := rws.bw @@ -1890,24 +2076,6 @@ func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*r return rw, req, nil } -var reqBodyCache = make(chan []byte, 8) - -func getRequestBodyBuf() []byte { - select { - case b := <-reqBodyCache: - return b - default: - return make([]byte, initialWindowSize) - } -} - -func putRequestBodyBuf(b []byte) { - select { - case reqBodyCache <- b: - default: - } -} - // Run on its own goroutine. func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler func(http.ResponseWriter, *http.Request)) { didPanic := true @@ -1920,7 +2088,7 @@ func (sc *serverConn) runHandler(rw *responseWriter, req *http.Request, handler stream: rw.rws.stream, }) // Same as net/http: - if shouldLogPanic(e) { + if e != nil && e != http.ErrAbortHandler { const size = 64 << 10 buf := make([]byte, size) buf = buf[:runtime.Stack(buf, false)] @@ -2003,12 +2171,6 @@ func (sc *serverConn) noteBodyReadFromHandler(st *stream, n int, err error) { case <-sc.doneServing: } } - if err == io.EOF { - if buf := st.reqBuf; buf != nil { - st.reqBuf = nil // shouldn't matter; field unused by other - putRequestBodyBuf(buf) - } - } } func (sc *serverConn) noteBodyRead(st *stream, n int) { @@ -2103,8 +2265,8 @@ func (b *requestBody) Read(p []byte) (n int, err error) { return } -// responseWriter is the http.ResponseWriter implementation. It's -// intentionally small (1 pointer wide) to minimize garbage. The +// responseWriter is the http.ResponseWriter implementation. It's +// intentionally small (1 pointer wide) to minimize garbage. The // responseWriterState pointer inside is zeroed at the end of a // request (in handlerDone) and calls on the responseWriter thereafter // simply crash (caller's mistake), but the much larger responseWriterState @@ -2138,6 +2300,7 @@ type responseWriterState struct { wroteHeader bool // WriteHeader called (explicitly or implicitly). Not necessarily sent to user yet. sentHeader bool // have we sent the header frame? handlerDone bool // handler has finished + dirty bool // a Write failed; don't reuse this responseWriterState sentContentLen int64 // non-zero if handler set a Content-Length header wroteBytes int64 @@ -2157,8 +2320,8 @@ func (rws *responseWriterState) hasTrailers() bool { return len(rws.trailers) != // written in the trailers at the end of the response. func (rws *responseWriterState) declareTrailer(k string) { k = http.CanonicalHeaderKey(k) - if !ValidTrailerHeader(k) { - // Forbidden by RFC 2616 14.40. + if !httpguts.ValidTrailerHeader(k) { + // Forbidden by RFC 7230, section 4.1.2. rws.conn.logf("ignoring invalid trailer %q", k) return } @@ -2195,7 +2358,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { clen = strconv.Itoa(len(p)) } _, hasContentType := rws.snapHeader["Content-Type"] - if !hasContentType && bodyAllowedForStatus(rws.status) { + if !hasContentType && bodyAllowedForStatus(rws.status) && len(p) > 0 { ctype = http.DetectContentType(p) } var date string @@ -2208,6 +2371,19 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { foreachHeaderElement(v, rws.declareTrailer) } + // "Connection" headers aren't allowed in HTTP/2 (RFC 7540, 8.1.2.2), + // but respect "Connection" == "close" to mean sending a GOAWAY and tearing + // down the TCP connection when idle, like we do for HTTP/1. + // TODO: remove more Connection-specific header fields here, in addition + // to "Connection". + if _, ok := rws.snapHeader["Connection"]; ok { + v := rws.snapHeader.Get("Connection") + delete(rws.snapHeader, "Connection") + if v == "close" { + rws.conn.startGracefulShutdown() + } + } + endStream := (rws.handlerDone && !rws.hasTrailers() && len(p) == 0) || isHeadResp err = rws.conn.writeHeaders(rws.stream, &writeResHeaders{ streamID: rws.stream.id, @@ -2219,6 +2395,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { date: date, }) if err != nil { + rws.dirty = true return 0, err } if endStream { @@ -2240,6 +2417,7 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { if len(p) > 0 || endStream { // only send a 0 byte DATA frame if we're ending the stream. if err := rws.conn.writeDataFromHandler(rws.stream, p, endStream); err != nil { + rws.dirty = true return 0, err } } @@ -2251,6 +2429,9 @@ func (rws *responseWriterState) writeChunk(p []byte) (n int, err error) { trailers: rws.trailers, endStream: true, }) + if err != nil { + rws.dirty = true + } return len(p), err } return len(p), nil @@ -2274,11 +2455,11 @@ const TrailerPrefix = "Trailer:" // after the header has already been flushed. Because the Go // ResponseWriter interface has no way to set Trailers (only the // Header), and because we didn't want to expand the ResponseWriter -// interface, and because nobody used trailers, and because RFC 2616 +// interface, and because nobody used trailers, and because RFC 7230 // says you SHOULD (but not must) predeclare any trailers in the // header, the official ResponseWriter rules said trailers in Go must // be predeclared, and then we reuse the same ResponseWriter.Header() -// map to mean both Headers and Trailers. When it's time to write the +// map to mean both Headers and Trailers. When it's time to write the // Trailers, we pick out the fields of Headers that were declared as // trailers. That worked for a while, until we found the first major // user of Trailers in the wild: gRPC (using them only over http2), @@ -2358,6 +2539,24 @@ func (w *responseWriter) Header() http.Header { return rws.handlerHeader } +// checkWriteHeaderCode is a copy of net/http's checkWriteHeaderCode. +func checkWriteHeaderCode(code int) { + // Issue 22880: require valid WriteHeader status codes. + // For now we only enforce that it's three digits. + // In the future we might block things over 599 (600 and above aren't defined + // at http://httpwg.org/specs/rfc7231.html#status.codes) + // and we might block under 200 (once we have more mature 1xx support). + // But for now any three digits. + // + // We used to send "HTTP/1.1 000 0" on the wire in responses but there's + // no equivalent bogus thing we can realistically send in HTTP/2, + // so we'll consistently panic instead and help people find their bugs + // early. (We can't return an error from WriteHeader even if we wanted to.) + if code < 100 || code > 999 { + panic(fmt.Sprintf("invalid WriteHeader code %v", code)) + } +} + func (w *responseWriter) WriteHeader(code int) { rws := w.rws if rws == nil { @@ -2368,6 +2567,7 @@ func (w *responseWriter) WriteHeader(code int) { func (rws *responseWriterState) writeHeader(code int) { if !rws.wroteHeader { + checkWriteHeaderCode(code) rws.wroteHeader = true rws.status = code if len(rws.handlerHeader) > 0 { @@ -2390,7 +2590,7 @@ func cloneHeader(h http.Header) http.Header { // // * Handler calls w.Write or w.WriteString -> // * -> rws.bw (*bufio.Writer) -> -// * (Handler migth call Flush) +// * (Handler might call Flush) // * -> chunkWriter{rws} // * -> responseWriterState.writeChunk(p []byte) // * -> responseWriterState.writeChunk (most of the magic; see comment there) @@ -2429,10 +2629,19 @@ func (w *responseWriter) write(lenData int, dataB []byte, dataS string) (n int, func (w *responseWriter) handlerDone() { rws := w.rws + dirty := rws.dirty rws.handlerDone = true w.Flush() w.rws = nil - responseWriterStatePool.Put(rws) + if !dirty { + // Only recycle the pool if all prior Write calls to + // the serverConn goroutine completed successfully. If + // they returned earlier due to resets from the peer + // there might still be write goroutines outstanding + // from the serverConn referencing the rws memory. See + // issue 20704. + responseWriterStatePool.Put(rws) + } } // Push errors. @@ -2441,14 +2650,9 @@ var ( ErrPushLimitReached = errors.New("http2: push would exceed peer's SETTINGS_MAX_CONCURRENT_STREAMS") ) -// pushOptions is the internal version of http.PushOptions, which we -// cannot include here because it's only defined in Go 1.8 and later. -type pushOptions struct { - Method string - Header http.Header -} +var _ http.Pusher = (*responseWriter)(nil) -func (w *responseWriter) push(target string, opts pushOptions) error { +func (w *responseWriter) Push(target string, opts *http.PushOptions) error { st := w.rws.stream sc := st.sc sc.serveG.checkNotOn() @@ -2459,6 +2663,10 @@ func (w *responseWriter) push(target string, opts pushOptions) error { return ErrRecursivePush } + if opts == nil { + opts = new(http.PushOptions) + } + // Default options. if opts.Method == "" { opts.Method = "GET" @@ -2514,7 +2722,7 @@ func (w *responseWriter) push(target string, opts pushOptions) error { return fmt.Errorf("method %q must be GET or HEAD", opts.Method) } - msg := startPushRequest{ + msg := &startPushRequest{ parent: st, method: opts.Method, url: u, @@ -2527,7 +2735,7 @@ func (w *responseWriter) push(target string, opts pushOptions) error { return errClientDisconnected case <-st.cw: return errStreamClosed - case sc.wantStartPushCh <- msg: + case sc.serveMsgCh <- msg: } select { @@ -2549,7 +2757,7 @@ type startPushRequest struct { done chan error } -func (sc *serverConn) startPush(msg startPushRequest) { +func (sc *serverConn) startPush(msg *startPushRequest) { sc.serveG.check() // http://tools.ietf.org/html/rfc7540#section-6.6. @@ -2588,7 +2796,7 @@ func (sc *serverConn) startPush(msg startPushRequest) { // A server that is unable to establish a new stream identifier can send a GOAWAY // frame so that the client is forced to open a new connection for new streams. if sc.maxPushPromiseID+2 >= 1<<31 { - sc.startGracefulShutdown() + sc.startGracefulShutdownInternal() return 0, ErrPushLimitReached } sc.maxPushPromiseID += 2 @@ -2630,7 +2838,7 @@ func (sc *serverConn) startPush(msg startPushRequest) { } // foreachHeaderElement splits v according to the "#rule" construction -// in RFC 2616 section 2.1 and calls fn for each non-empty element. +// in RFC 7230 section 7 and calls fn for each non-empty element. func foreachHeaderElement(v string, fn func(string)) { v = textproto.TrimString(v) if v == "" { @@ -2678,66 +2886,6 @@ func new400Handler(err error) http.HandlerFunc { } } -// ValidTrailerHeader reports whether name is a valid header field name to appear -// in trailers. -// See: http://tools.ietf.org/html/rfc7230#section-4.1.2 -func ValidTrailerHeader(name string) bool { - name = http.CanonicalHeaderKey(name) - if strings.HasPrefix(name, "If-") || badTrailer[name] { - return false - } - return true -} - -var badTrailer = map[string]bool{ - "Authorization": true, - "Cache-Control": true, - "Connection": true, - "Content-Encoding": true, - "Content-Length": true, - "Content-Range": true, - "Content-Type": true, - "Expect": true, - "Host": true, - "Keep-Alive": true, - "Max-Forwards": true, - "Pragma": true, - "Proxy-Authenticate": true, - "Proxy-Authorization": true, - "Proxy-Connection": true, - "Range": true, - "Realm": true, - "Te": true, - "Trailer": true, - "Transfer-Encoding": true, - "Www-Authenticate": true, -} - -// h1ServerShutdownChan returns a channel that will be closed when the -// provided *http.Server wants to shut down. -// -// This is a somewhat hacky way to get at http1 innards. It works -// when the http2 code is bundled into the net/http package in the -// standard library. The alternatives ended up making the cmd/go tool -// depend on http Servers. This is the lightest option for now. -// This is tested via the TestServeShutdown* tests in net/http. -func h1ServerShutdownChan(hs *http.Server) <-chan struct{} { - if fn := testh1ServerShutdownChan; fn != nil { - return fn(hs) - } - var x interface{} = hs - type I interface { - getDoneChan() <-chan struct{} - } - if hs, ok := x.(I); ok { - return hs.getDoneChan() - } - return nil -} - -// optional test hook for h1ServerShutdownChan. -var testh1ServerShutdownChan func(hs *http.Server) <-chan struct{} - // h1ServerKeepAlivesDisabled reports whether hs has its keep-alives // disabled. See comments on h1ServerShutdownChan above for why // the code is written this way. diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go index 0c7e859db..f272e8f9f 100644 --- a/vendor/golang.org/x/net/http2/transport.go +++ b/vendor/golang.org/x/net/http2/transport.go @@ -10,6 +10,7 @@ import ( "bufio" "bytes" "compress/gzip" + "context" "crypto/rand" "crypto/tls" "errors" @@ -18,17 +19,20 @@ import ( "io/ioutil" "log" "math" + mathrand "math/rand" "net" "net/http" + "net/http/httptrace" + "net/textproto" "sort" "strconv" "strings" "sync" "time" + "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" "golang.org/x/net/idna" - "golang.org/x/net/lex/httplex" ) const ( @@ -86,13 +90,23 @@ type Transport struct { // MaxHeaderListSize is the http2 SETTINGS_MAX_HEADER_LIST_SIZE to // send in the initial settings frame. It is how many bytes - // of response headers are allow. Unlike the http2 spec, zero here + // of response headers are allowed. Unlike the http2 spec, zero here // means to use a default limit (currently 10MB). If you actually // want to advertise an ulimited value to the peer, Transport // interprets the highest possible value here (0xffffffff or 1<<32-1) // to mean no limit. MaxHeaderListSize uint32 + // StrictMaxConcurrentStreams controls whether the server's + // SETTINGS_MAX_CONCURRENT_STREAMS should be respected + // globally. If false, new TCP connections are created to the + // server as needed to keep each under the per-connection + // SETTINGS_MAX_CONCURRENT_STREAMS limit. If true, the + // server's SETTINGS_MAX_CONCURRENT_STREAMS is interpreted as + // a global limit and callers of RoundTrip block when needed, + // waiting for their turn. + StrictMaxConcurrentStreams bool + // t1, if non-nil, is the standard library Transport using // this transport. Its settings are used (but not its // RoundTrip method, etc). @@ -116,16 +130,56 @@ func (t *Transport) disableCompression() bool { return t.DisableCompression || (t.t1 != nil && t.t1.DisableCompression) } -var errTransportVersion = errors.New("http2: ConfigureTransport is only supported starting at Go 1.6") - // ConfigureTransport configures a net/http HTTP/1 Transport to use HTTP/2. -// It requires Go 1.6 or later and returns an error if the net/http package is too old -// or if t1 has already been HTTP/2-enabled. +// It returns an error if t1 has already been HTTP/2-enabled. func ConfigureTransport(t1 *http.Transport) error { - _, err := configureTransport(t1) // in configure_transport.go (go1.6) or not_go16.go + _, err := configureTransport(t1) return err } +func configureTransport(t1 *http.Transport) (*Transport, error) { + connPool := new(clientConnPool) + t2 := &Transport{ + ConnPool: noDialClientConnPool{connPool}, + t1: t1, + } + connPool.t = t2 + if err := registerHTTPSProtocol(t1, noDialH2RoundTripper{t2}); err != nil { + return nil, err + } + if t1.TLSClientConfig == nil { + t1.TLSClientConfig = new(tls.Config) + } + if !strSliceContains(t1.TLSClientConfig.NextProtos, "h2") { + t1.TLSClientConfig.NextProtos = append([]string{"h2"}, t1.TLSClientConfig.NextProtos...) + } + if !strSliceContains(t1.TLSClientConfig.NextProtos, "http/1.1") { + t1.TLSClientConfig.NextProtos = append(t1.TLSClientConfig.NextProtos, "http/1.1") + } + upgradeFn := func(authority string, c *tls.Conn) http.RoundTripper { + addr := authorityAddr("https", authority) + if used, err := connPool.addConnIfNeeded(addr, t2, c); err != nil { + go c.Close() + return erringRoundTripper{err} + } else if !used { + // Turns out we don't need this c. + // For example, two goroutines made requests to the same host + // at the same time, both kicking off TCP dials. (since protocol + // was unknown) + go c.Close() + } + return t2 + } + if m := t1.TLSNextProto; len(m) == 0 { + t1.TLSNextProto = map[string]func(string, *tls.Conn) http.RoundTripper{ + "h2": upgradeFn, + } + } else { + m["h2"] = upgradeFn + } + return t2, nil +} + func (t *Transport) connPool() ClientConnPool { t.connPoolOnce.Do(t.initConnPool) return t.connPoolOrDef @@ -158,21 +212,24 @@ type ClientConn struct { cond *sync.Cond // hold mu; broadcast on flow/closed changes flow flow // our conn-level flow control quota (cs.flow is per stream) inflow flow // peer's conn-level flow control + closing bool closed bool wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back goAway *GoAwayFrame // if non-nil, the GoAwayFrame we received goAwayDebug string // goAway frame's debug data, retained as a string streams map[uint32]*clientStream // client-initiated nextStreamID uint32 + pendingRequests int // requests blocked and waiting to be sent because len(streams) == maxConcurrentStreams pings map[[8]byte]chan struct{} // in flight ping data to notification channel bw *bufio.Writer br *bufio.Reader fr *Framer lastActive time.Time // Settings from peer: (also guarded by mu) - maxFrameSize uint32 - maxConcurrentStreams uint32 - initialWindowSize uint32 + maxFrameSize uint32 + maxConcurrentStreams uint32 + peerMaxHeaderListSize uint64 + initialWindowSize uint32 hbuf bytes.Buffer // HPACK encoder writes into this henc *hpack.Encoder @@ -187,7 +244,7 @@ type ClientConn struct { type clientStream struct { cc *ClientConn req *http.Request - trace *clientTrace // or nil + trace *httptrace.ClientTrace // or nil ID uint32 resc chan resAndError bufPipe pipe // buffered pipe with the flow-controlled response payload @@ -208,43 +265,65 @@ type clientStream struct { done chan struct{} // closed when stream remove from cc.streams map; close calls guarded by cc.mu // owned by clientConnReadLoop: - firstByte bool // got the first response byte - pastHeaders bool // got first MetaHeadersFrame (actual headers) - pastTrailers bool // got optional second MetaHeadersFrame (trailers) + firstByte bool // got the first response byte + pastHeaders bool // got first MetaHeadersFrame (actual headers) + pastTrailers bool // got optional second MetaHeadersFrame (trailers) + num1xx uint8 // number of 1xx responses seen trailer http.Header // accumulated trailers resTrailer *http.Header // client's Response.Trailer } -// awaitRequestCancel runs in its own goroutine and waits for the user -// to cancel a RoundTrip request, its context to expire, or for the -// request to be done (any way it might be removed from the cc.streams -// map: peer reset, successful completion, TCP connection breakage, -// etc) -func (cs *clientStream) awaitRequestCancel(req *http.Request) { - ctx := reqContext(req) +// awaitRequestCancel waits for the user to cancel a request or for the done +// channel to be signaled. A non-nil error is returned only if the request was +// canceled. +func awaitRequestCancel(req *http.Request, done <-chan struct{}) error { + ctx := req.Context() if req.Cancel == nil && ctx.Done() == nil { - return + return nil } select { case <-req.Cancel: - cs.cancelStream() - cs.bufPipe.CloseWithError(errRequestCanceled) + return errRequestCanceled case <-ctx.Done(): + return ctx.Err() + case <-done: + return nil + } +} + +var got1xxFuncForTests func(int, textproto.MIMEHeader) error + +// get1xxTraceFunc returns the value of request's httptrace.ClientTrace.Got1xxResponse func, +// if any. It returns nil if not set or if the Go version is too old. +func (cs *clientStream) get1xxTraceFunc() func(int, textproto.MIMEHeader) error { + if fn := got1xxFuncForTests; fn != nil { + return fn + } + return traceGot1xxResponseFunc(cs.trace) +} + +// awaitRequestCancel waits for the user to cancel a request, its context to +// expire, or for the request to be done (any way it might be removed from the +// cc.streams map: peer reset, successful completion, TCP connection breakage, +// etc). If the request is canceled, then cs will be canceled and closed. +func (cs *clientStream) awaitRequestCancel(req *http.Request) { + if err := awaitRequestCancel(req, cs.done); err != nil { cs.cancelStream() - cs.bufPipe.CloseWithError(ctx.Err()) - case <-cs.done: + cs.bufPipe.CloseWithError(err) } } func (cs *clientStream) cancelStream() { - cs.cc.mu.Lock() + cc := cs.cc + cc.mu.Lock() didReset := cs.didReset cs.didReset = true - cs.cc.mu.Unlock() + cc.mu.Unlock() if !didReset { - cs.cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) + cc.forgetStreamID(cs.ID) } } @@ -261,6 +340,13 @@ func (cs *clientStream) checkResetOrDone() error { } } +func (cs *clientStream) getStartedWrite() bool { + cc := cs.cc + cc.mu.Lock() + defer cc.mu.Unlock() + return cs.startedWrite +} + func (cs *clientStream) abortRequestBodyWrite(err error) { if err == nil { panic("nil error") @@ -286,7 +372,26 @@ func (sew stickyErrWriter) Write(p []byte) (n int, err error) { return } -var ErrNoCachedConn = errors.New("http2: no cached connection was available") +// noCachedConnError is the concrete type of ErrNoCachedConn, which +// needs to be detected by net/http regardless of whether it's its +// bundled version (in h2_bundle.go with a rewritten type name) or +// from a user's x/net/http2. As such, as it has a unique method name +// (IsHTTP2NoCachedConnError) that net/http sniffs for via func +// isNoCachedConnError. +type noCachedConnError struct{} + +func (noCachedConnError) IsHTTP2NoCachedConnError() {} +func (noCachedConnError) Error() string { return "http2: no cached connection was available" } + +// isNoCachedConnError reports whether err is of type noCachedConnError +// or its equivalent renamed type in net/http2's h2_bundle.go. Both types +// may coexist in the same running program. +func isNoCachedConnError(err error) bool { + _, ok := err.(interface{ IsHTTP2NoCachedConnError() }) + return ok +} + +var ErrNoCachedConn error = noCachedConnError{} // RoundTripOpt are options for the Transport.RoundTripOpt method. type RoundTripOpt struct { @@ -329,17 +434,28 @@ func (t *Transport) RoundTripOpt(req *http.Request, opt RoundTripOpt) (*http.Res } addr := authorityAddr(req.URL.Scheme, req.URL.Host) - for { + for retry := 0; ; retry++ { cc, err := t.connPool().GetClientConn(req, addr) if err != nil { t.vlogf("http2: Transport failed to get client conn for %s: %v", addr, err) return nil, err } traceGotConn(req, cc) - res, err := cc.RoundTrip(req) - if err != nil { - if req, err = shouldRetryRequest(req, err); err == nil { - continue + res, gotErrAfterReqBodyWrite, err := cc.roundTrip(req) + if err != nil && retry <= 6 { + if req, err = shouldRetryRequest(req, err, gotErrAfterReqBodyWrite); err == nil { + // After the first retry, do exponential backoff with 10% jitter. + if retry == 0 { + continue + } + backoff := float64(uint(1) << (uint(retry) - 1)) + backoff += backoff * (0.1 * mathrand.Float64()) + select { + case <-time.After(time.Second * time.Duration(backoff)): + continue + case <-req.Context().Done(): + return nil, req.Context().Err() + } } } if err != nil { @@ -360,36 +476,30 @@ func (t *Transport) CloseIdleConnections() { } var ( - errClientConnClosed = errors.New("http2: client conn is closed") - errClientConnUnusable = errors.New("http2: client conn not usable") - - errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") - errClientConnGotGoAwayAfterSomeReqBody = errors.New("http2: Transport received Server's graceful shutdown GOAWAY; some request body already written") + errClientConnClosed = errors.New("http2: client conn is closed") + errClientConnUnusable = errors.New("http2: client conn not usable") + errClientConnGotGoAway = errors.New("http2: Transport received Server's graceful shutdown GOAWAY") ) // shouldRetryRequest is called by RoundTrip when a request fails to get // response headers. It is always called with a non-nil error. // It returns either a request to retry (either the same request, or a // modified clone), or an error if the request can't be replayed. -func shouldRetryRequest(req *http.Request, err error) (*http.Request, error) { - switch err { - default: +func shouldRetryRequest(req *http.Request, err error, afterBodyWrite bool) (*http.Request, error) { + if !canRetryError(err) { return nil, err - case errClientConnUnusable, errClientConnGotGoAway: + } + // If the Body is nil (or http.NoBody), it's safe to reuse + // this request and its Body. + if req.Body == nil || req.Body == http.NoBody { return req, nil - case errClientConnGotGoAwayAfterSomeReqBody: - // If the Body is nil (or http.NoBody), it's safe to reuse - // this request and its Body. - if req.Body == nil || reqBodyIsNoBody(req.Body) { - return req, nil - } - // Otherwise we depend on the Request having its GetBody - // func defined. - getBody := reqGetBody(req) // Go 1.8: getBody = req.GetBody - if getBody == nil { - return nil, errors.New("http2: Transport: peer server initiated graceful shutdown after some of Request.Body was written; define Request.GetBody to avoid this error") - } - body, err := getBody() + } + + // If the request body can be reset back to its original + // state via the optional req.GetBody, do that. + if req.GetBody != nil { + // TODO: consider a req.Body.Close here? or audit that all caller paths do? + body, err := req.GetBody() if err != nil { return nil, err } @@ -397,6 +507,27 @@ func shouldRetryRequest(req *http.Request, err error) (*http.Request, error) { newReq.Body = body return &newReq, nil } + + // The Request.Body can't reset back to the beginning, but we + // don't seem to have started to read from it yet, so reuse + // the request directly. The "afterBodyWrite" means the + // bodyWrite process has started, which becomes true before + // the first Read. + if !afterBodyWrite { + return req, nil + } + + return nil, fmt.Errorf("http2: Transport: cannot retry err [%v] after Request.Body was written; define Request.GetBody to avoid this error", err) +} + +func canRetryError(err error) bool { + if err == errClientConnUnusable || err == errClientConnGotGoAway { + return true + } + if se, ok := err.(StreamError); ok { + return se.Code == ErrCodeRefusedStream + } + return false } func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, error) { @@ -414,7 +545,7 @@ func (t *Transport) dialClientConn(addr string, singleUse bool) (*ClientConn, er func (t *Transport) newTLSConfig(host string) *tls.Config { cfg := new(tls.Config) if t.TLSClientConfig != nil { - *cfg = *cloneTLSConfig(t.TLSClientConfig) + *cfg = *t.TLSClientConfig.Clone() } if !strSliceContains(cfg.NextProtos, NextProtoTLS) { cfg.NextProtos = append([]string{NextProtoTLS}, cfg.NextProtos...) @@ -465,7 +596,7 @@ func (t *Transport) expectContinueTimeout() time.Duration { if t.t1 == nil { return 0 } - return transportExpectContinueTimeout(t.t1) + return t.t1.ExpectContinueTimeout } func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { @@ -474,17 +605,18 @@ func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) { func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) { cc := &ClientConn{ - t: t, - tconn: c, - readerDone: make(chan struct{}), - nextStreamID: 1, - maxFrameSize: 16 << 10, // spec default - initialWindowSize: 65535, // spec default - maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. - streams: make(map[uint32]*clientStream), - singleUse: singleUse, - wantSettingsAck: true, - pings: make(map[[8]byte]chan struct{}), + t: t, + tconn: c, + readerDone: make(chan struct{}), + nextStreamID: 1, + maxFrameSize: 16 << 10, // spec default + initialWindowSize: 65535, // spec default + maxConcurrentStreams: 1000, // "infinite", per spec. 1000 seems good enough. + peerMaxHeaderListSize: 0xffffffffffffffff, // "infinite", per spec. Use 2^64-1 instead. + streams: make(map[uint32]*clientStream), + singleUse: singleUse, + wantSettingsAck: true, + pings: make(map[[8]byte]chan struct{}), } if d := t.idleConnTimeout(); d != 0 { cc.idleTimeout = d @@ -509,6 +641,10 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro // henc in response to SETTINGS frames? cc.henc = hpack.NewEncoder(&cc.hbuf) + if t.AllowHTTP { + cc.nextStreamID = 3 + } + if cs, ok := c.(connectionStater); ok { state := cs.ConnectionState() cc.tlsState = &state @@ -560,22 +696,54 @@ func (cc *ClientConn) setGoAway(f *GoAwayFrame) { } } +// CanTakeNewRequest reports whether the connection can take a new request, +// meaning it has not been closed or received or sent a GOAWAY. func (cc *ClientConn) CanTakeNewRequest() bool { cc.mu.Lock() defer cc.mu.Unlock() return cc.canTakeNewRequestLocked() } -func (cc *ClientConn) canTakeNewRequestLocked() bool { +// clientConnIdleState describes the suitability of a client +// connection to initiate a new RoundTrip request. +type clientConnIdleState struct { + canTakeNewRequest bool + freshConn bool // whether it's unused by any previous request +} + +func (cc *ClientConn) idleState() clientConnIdleState { + cc.mu.Lock() + defer cc.mu.Unlock() + return cc.idleStateLocked() +} + +func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) { if cc.singleUse && cc.nextStreamID > 1 { - return false + return + } + var maxConcurrentOkay bool + if cc.t.StrictMaxConcurrentStreams { + // We'll tell the caller we can take a new request to + // prevent the caller from dialing a new TCP + // connection, but then we'll block later before + // writing it. + maxConcurrentOkay = true + } else { + maxConcurrentOkay = int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) } - return cc.goAway == nil && !cc.closed && - int64(len(cc.streams)+1) < int64(cc.maxConcurrentStreams) && - cc.nextStreamID < math.MaxInt32 + + st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay && + int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 + st.freshConn = cc.nextStreamID == 1 && st.canTakeNewRequest + return +} + +func (cc *ClientConn) canTakeNewRequestLocked() bool { + st := cc.idleStateLocked() + return st.canTakeNewRequest } -// onIdleTimeout is called from a time.AfterFunc goroutine. It will +// onIdleTimeout is called from a time.AfterFunc goroutine. It will // only be called when we're idle, but because we're coming from a new // goroutine, there could be a new request coming in at the same time, // so this simply calls the synchronized closeIfIdle to shut down this @@ -602,6 +770,87 @@ func (cc *ClientConn) closeIfIdle() { cc.tconn.Close() } +var shutdownEnterWaitStateHook = func() {} + +// Shutdown gracefully close the client connection, waiting for running streams to complete. +func (cc *ClientConn) Shutdown(ctx context.Context) error { + if err := cc.sendGoAway(); err != nil { + return err + } + // Wait for all in-flight streams to complete or connection to close + done := make(chan error, 1) + cancelled := false // guarded by cc.mu + go func() { + cc.mu.Lock() + defer cc.mu.Unlock() + for { + if len(cc.streams) == 0 || cc.closed { + cc.closed = true + done <- cc.tconn.Close() + break + } + if cancelled { + break + } + cc.cond.Wait() + } + }() + shutdownEnterWaitStateHook() + select { + case err := <-done: + return err + case <-ctx.Done(): + cc.mu.Lock() + // Free the goroutine above + cancelled = true + cc.cond.Broadcast() + cc.mu.Unlock() + return ctx.Err() + } +} + +func (cc *ClientConn) sendGoAway() error { + cc.mu.Lock() + defer cc.mu.Unlock() + cc.wmu.Lock() + defer cc.wmu.Unlock() + if cc.closing { + // GOAWAY sent already + return nil + } + // Send a graceful shutdown frame to server + maxStreamID := cc.nextStreamID + if err := cc.fr.WriteGoAway(maxStreamID, ErrCodeNo, nil); err != nil { + return err + } + if err := cc.bw.Flush(); err != nil { + return err + } + // Prevent new requests + cc.closing = true + return nil +} + +// Close closes the client connection immediately. +// +// In-flight requests are interrupted. For a graceful shutdown, use Shutdown instead. +func (cc *ClientConn) Close() error { + cc.mu.Lock() + defer cc.cond.Broadcast() + defer cc.mu.Unlock() + err := errors.New("http2: client connection force closed via ClientConn.Close") + for id, cs := range cc.streams { + select { + case cs.resc <- resAndError{err: err}: + default: + } + cs.bufPipe.CloseWithError(err) + delete(cc.streams, id) + } + cc.closed = true + return cc.tconn.Close() +} + const maxAllocFrameSize = 512 << 10 // frameBuffer returns a scratch buffer suitable for writing DATA frames. @@ -684,7 +933,7 @@ func checkConnHeaders(req *http.Request) error { if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") { return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv) } - if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "close" && vv[0] != "keep-alive") { + if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !strings.EqualFold(vv[0], "close") && !strings.EqualFold(vv[0], "keep-alive")) { return fmt.Errorf("http2: invalid Connection request header: %q", vv) } return nil @@ -694,7 +943,7 @@ func checkConnHeaders(req *http.Request) error { // req.ContentLength, where 0 actually means zero (not unknown) and -1 // means unknown. func actualContentLength(req *http.Request) int64 { - if req.Body == nil { + if req.Body == nil || req.Body == http.NoBody { return 0 } if req.ContentLength != 0 { @@ -704,8 +953,13 @@ func actualContentLength(req *http.Request) int64 { } func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { + resp, _, err := cc.roundTrip(req) + return resp, err +} + +func (cc *ClientConn) roundTrip(req *http.Request) (res *http.Response, gotErrAfterReqBodyWrite bool, err error) { if err := checkConnHeaders(req); err != nil { - return nil, err + return nil, false, err } if cc.idleTimer != nil { cc.idleTimer.Stop() @@ -713,20 +967,19 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { trailers, err := commaSeparatedTrailers(req) if err != nil { - return nil, err + return nil, false, err } hasTrailers := trailers != "" cc.mu.Lock() - cc.lastActive = time.Now() - if cc.closed || !cc.canTakeNewRequestLocked() { + if err := cc.awaitOpenSlotForRequest(req); err != nil { cc.mu.Unlock() - return nil, errClientConnUnusable + return nil, false, err } body := req.Body - hasBody := body != nil contentLen := actualContentLength(req) + hasBody := contentLen != 0 // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere? var requestedGzip bool @@ -755,19 +1008,19 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { hdrs, err := cc.encodeHeaders(req, requestedGzip, trailers, contentLen) if err != nil { cc.mu.Unlock() - return nil, err + return nil, false, err } cs := cc.newStream() cs.req = req - cs.trace = requestTrace(req) + cs.trace = httptrace.ContextClientTrace(req.Context()) cs.requestedGzip = requestedGzip bodyWriter := cc.t.getBodyWriterState(cs, body) cs.on100 = bodyWriter.on100 cc.wmu.Lock() endStream := !hasBody && !hasTrailers - werr := cc.writeHeaders(cs.ID, endStream, hdrs) + werr := cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs) cc.wmu.Unlock() traceWroteHeaders(cs.trace) cc.mu.Unlock() @@ -781,7 +1034,7 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { // Don't bother sending a RST_STREAM (our write already failed; // no need to keep writing) traceWroteRequest(cs.trace, werr) - return nil, werr + return nil, false, werr } var respHeaderTimer <-chan time.Time @@ -798,9 +1051,9 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { readLoopResCh := cs.resc bodyWritten := false - ctx := reqContext(req) + ctx := req.Context() - handleReadLoopResponse := func(re resAndError) (*http.Response, error) { + handleReadLoopResponse := func(re resAndError) (*http.Response, bool, error) { res := re.res if re.err != nil || res.StatusCode > 299 { // On error or status code 3xx, 4xx, 5xx, etc abort any @@ -809,26 +1062,19 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { // 2xx, however, then assume the server DOES potentially // want our body (e.g. full-duplex streaming: // golang.org/issue/13444). If it turns out the server - // doesn't, they'll RST_STREAM us soon enough. This is a - // heuristic to avoid adding knobs to Transport. Hopefully + // doesn't, they'll RST_STREAM us soon enough. This is a + // heuristic to avoid adding knobs to Transport. Hopefully // we can keep it. bodyWriter.cancel() cs.abortRequestBodyWrite(errStopReqBodyWrite) } if re.err != nil { - if re.err == errClientConnGotGoAway { - cc.mu.Lock() - if cs.startedWrite { - re.err = errClientConnGotGoAwayAfterSomeReqBody - } - cc.mu.Unlock() - } cc.forgetStreamID(cs.ID) - return nil, re.err + return nil, cs.getStartedWrite(), re.err } res.Request = req res.TLS = cc.tlsState - return res, nil + return res, false, nil } for { @@ -836,37 +1082,37 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { case re := <-readLoopResCh: return handleReadLoopResponse(re) case <-respHeaderTimer: - cc.forgetStreamID(cs.ID) if !hasBody || bodyWritten { cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) } else { bodyWriter.cancel() cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) } - return nil, errTimeout - case <-ctx.Done(): cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), errTimeout + case <-ctx.Done(): if !hasBody || bodyWritten { cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) } else { bodyWriter.cancel() cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) } - return nil, ctx.Err() - case <-req.Cancel: cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), ctx.Err() + case <-req.Cancel: if !hasBody || bodyWritten { cc.writeStreamReset(cs.ID, ErrCodeCancel, nil) } else { bodyWriter.cancel() cs.abortRequestBodyWrite(errStopReqBodyWriteAndCancel) } - return nil, errRequestCanceled + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), errRequestCanceled case <-cs.peerReset: // processResetStream already removed the // stream from the streams map; no need for // forgetStreamID. - return nil, cs.resetErr + return nil, cs.getStartedWrite(), cs.resetErr case err := <-bodyWriter.resc: // Prefer the read loop's response, if available. Issue 16102. select { @@ -875,7 +1121,8 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { default: } if err != nil { - return nil, err + cc.forgetStreamID(cs.ID) + return nil, cs.getStartedWrite(), err } bodyWritten = true if d := cc.responseHeaderTimeout(); d != 0 { @@ -887,14 +1134,55 @@ func (cc *ClientConn) RoundTrip(req *http.Request) (*http.Response, error) { } } +// awaitOpenSlotForRequest waits until len(streams) < maxConcurrentStreams. +// Must hold cc.mu. +func (cc *ClientConn) awaitOpenSlotForRequest(req *http.Request) error { + var waitingForConn chan struct{} + var waitingForConnErr error // guarded by cc.mu + for { + cc.lastActive = time.Now() + if cc.closed || !cc.canTakeNewRequestLocked() { + if waitingForConn != nil { + close(waitingForConn) + } + return errClientConnUnusable + } + if int64(len(cc.streams))+1 <= int64(cc.maxConcurrentStreams) { + if waitingForConn != nil { + close(waitingForConn) + } + return nil + } + // Unfortunately, we cannot wait on a condition variable and channel at + // the same time, so instead, we spin up a goroutine to check if the + // request is canceled while we wait for a slot to open in the connection. + if waitingForConn == nil { + waitingForConn = make(chan struct{}) + go func() { + if err := awaitRequestCancel(req, waitingForConn); err != nil { + cc.mu.Lock() + waitingForConnErr = err + cc.cond.Broadcast() + cc.mu.Unlock() + } + }() + } + cc.pendingRequests++ + cc.cond.Wait() + cc.pendingRequests-- + if waitingForConnErr != nil { + return waitingForConnErr + } + } +} + // requires cc.wmu be held -func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, hdrs []byte) error { +func (cc *ClientConn) writeHeaders(streamID uint32, endStream bool, maxFrameSize int, hdrs []byte) error { first := true // first frame written (HEADERS is first, then CONTINUATION) - frameSize := int(cc.maxFrameSize) for len(hdrs) > 0 && cc.werr == nil { chunk := hdrs - if len(chunk) > frameSize { - chunk = chunk[:frameSize] + if len(chunk) > maxFrameSize { + chunk = chunk[:maxFrameSize] } hdrs = hdrs[len(chunk):] endHeaders := len(hdrs) == 0 @@ -955,6 +1243,7 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) ( sawEOF = true err = nil } else if err != nil { + cc.writeStreamReset(cs.ID, ErrCodeCancel, err) return err } @@ -1002,17 +1291,26 @@ func (cs *clientStream) writeRequestBody(body io.Reader, bodyCloser io.Closer) ( var trls []byte if hasTrailers { cc.mu.Lock() - defer cc.mu.Unlock() - trls = cc.encodeTrailers(req) + trls, err = cc.encodeTrailers(req) + cc.mu.Unlock() + if err != nil { + cc.writeStreamReset(cs.ID, ErrCodeInternal, err) + cc.forgetStreamID(cs.ID) + return err + } } + cc.mu.Lock() + maxFrameSize := int(cc.maxFrameSize) + cc.mu.Unlock() + cc.wmu.Lock() defer cc.wmu.Unlock() // Two ways to send END_STREAM: either with trailers, or // with an empty DATA frame. if len(trls) > 0 { - err = cc.writeHeaders(cs.ID, true, trls) + err = cc.writeHeaders(cs.ID, true, maxFrameSize, trls) } else { err = cc.fr.WriteData(cs.ID, true, nil) } @@ -1071,7 +1369,7 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail if host == "" { host = req.URL.Host } - host, err := httplex.PunycodeHostPort(host) + host, err := httpguts.PunycodeHostPort(host) if err != nil { return nil, err } @@ -1096,72 +1394,103 @@ func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trail // potentially pollute our hpack state. (We want to be able to // continue to reuse the hpack encoder for future requests) for k, vv := range req.Header { - if !httplex.ValidHeaderFieldName(k) { + if !httpguts.ValidHeaderFieldName(k) { return nil, fmt.Errorf("invalid HTTP header name %q", k) } for _, v := range vv { - if !httplex.ValidHeaderFieldValue(v) { + if !httpguts.ValidHeaderFieldValue(v) { return nil, fmt.Errorf("invalid HTTP header value %q for header %q", v, k) } } } - // 8.1.2.3 Request Pseudo-Header Fields - // The :path pseudo-header field includes the path and query parts of the - // target URI (the path-absolute production and optionally a '?' character - // followed by the query production (see Sections 3.3 and 3.4 of - // [RFC3986]). - cc.writeHeader(":authority", host) - cc.writeHeader(":method", req.Method) - if req.Method != "CONNECT" { - cc.writeHeader(":path", path) - cc.writeHeader(":scheme", req.URL.Scheme) - } - if trailers != "" { - cc.writeHeader("trailer", trailers) - } + enumerateHeaders := func(f func(name, value string)) { + // 8.1.2.3 Request Pseudo-Header Fields + // The :path pseudo-header field includes the path and query parts of the + // target URI (the path-absolute production and optionally a '?' character + // followed by the query production (see Sections 3.3 and 3.4 of + // [RFC3986]). + f(":authority", host) + f(":method", req.Method) + if req.Method != "CONNECT" { + f(":path", path) + f(":scheme", req.URL.Scheme) + } + if trailers != "" { + f("trailer", trailers) + } - var didUA bool - for k, vv := range req.Header { - lowKey := strings.ToLower(k) - switch lowKey { - case "host", "content-length": - // Host is :authority, already sent. - // Content-Length is automatic, set below. - continue - case "connection", "proxy-connection", "transfer-encoding", "upgrade", "keep-alive": - // Per 8.1.2.2 Connection-Specific Header - // Fields, don't send connection-specific - // fields. We have already checked if any - // are error-worthy so just ignore the rest. - continue - case "user-agent": - // Match Go's http1 behavior: at most one - // User-Agent. If set to nil or empty string, - // then omit it. Otherwise if not mentioned, - // include the default (below). - didUA = true - if len(vv) < 1 { + var didUA bool + for k, vv := range req.Header { + if strings.EqualFold(k, "host") || strings.EqualFold(k, "content-length") { + // Host is :authority, already sent. + // Content-Length is automatic, set below. continue - } - vv = vv[:1] - if vv[0] == "" { + } else if strings.EqualFold(k, "connection") || strings.EqualFold(k, "proxy-connection") || + strings.EqualFold(k, "transfer-encoding") || strings.EqualFold(k, "upgrade") || + strings.EqualFold(k, "keep-alive") { + // Per 8.1.2.2 Connection-Specific Header + // Fields, don't send connection-specific + // fields. We have already checked if any + // are error-worthy so just ignore the rest. continue + } else if strings.EqualFold(k, "user-agent") { + // Match Go's http1 behavior: at most one + // User-Agent. If set to nil or empty string, + // then omit it. Otherwise if not mentioned, + // include the default (below). + didUA = true + if len(vv) < 1 { + continue + } + vv = vv[:1] + if vv[0] == "" { + continue + } + + } + + for _, v := range vv { + f(k, v) } } - for _, v := range vv { - cc.writeHeader(lowKey, v) + if shouldSendReqContentLength(req.Method, contentLength) { + f("content-length", strconv.FormatInt(contentLength, 10)) + } + if addGzipHeader { + f("accept-encoding", "gzip") + } + if !didUA { + f("user-agent", defaultUserAgent) } } - if shouldSendReqContentLength(req.Method, contentLength) { - cc.writeHeader("content-length", strconv.FormatInt(contentLength, 10)) - } - if addGzipHeader { - cc.writeHeader("accept-encoding", "gzip") - } - if !didUA { - cc.writeHeader("user-agent", defaultUserAgent) + + // Do a first pass over the headers counting bytes to ensure + // we don't exceed cc.peerMaxHeaderListSize. This is done as a + // separate pass before encoding the headers to prevent + // modifying the hpack state. + hlSize := uint64(0) + enumerateHeaders(func(name, value string) { + hf := hpack.HeaderField{Name: name, Value: value} + hlSize += uint64(hf.Size()) + }) + + if hlSize > cc.peerMaxHeaderListSize { + return nil, errRequestHeaderListSize } + + trace := httptrace.ContextClientTrace(req.Context()) + traceHeaders := traceHasWroteHeaderField(trace) + + // Header list size is ok. Write the headers. + enumerateHeaders(func(name, value string) { + name = strings.ToLower(name) + cc.writeHeader(name, value) + if traceHeaders { + traceWroteHeaderField(trace, name, value) + } + }) + return cc.hbuf.Bytes(), nil } @@ -1188,17 +1517,29 @@ func shouldSendReqContentLength(method string, contentLength int64) bool { } // requires cc.mu be held. -func (cc *ClientConn) encodeTrailers(req *http.Request) []byte { +func (cc *ClientConn) encodeTrailers(req *http.Request) ([]byte, error) { cc.hbuf.Reset() + + hlSize := uint64(0) + for k, vv := range req.Trailer { + for _, v := range vv { + hf := hpack.HeaderField{Name: k, Value: v} + hlSize += uint64(hf.Size()) + } + } + if hlSize > cc.peerMaxHeaderListSize { + return nil, errRequestHeaderListSize + } + for k, vv := range req.Trailer { - // Transfer-Encoding, etc.. have already been filter at the + // Transfer-Encoding, etc.. have already been filtered at the // start of RoundTrip lowKey := strings.ToLower(k) for _, v := range vv { cc.writeHeader(lowKey, v) } } - return cc.hbuf.Bytes() + return cc.hbuf.Bytes(), nil } func (cc *ClientConn) writeHeader(name, value string) { @@ -1246,7 +1587,9 @@ func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { cc.idleTimer.Reset(cc.idleTimeout) } close(cs.done) - cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl + // Wake up checkResetOrDone via clientStream.awaitFlowControl and + // wake up RoundTrip if there is a pending request. + cc.cond.Broadcast() } return cs } @@ -1254,17 +1597,12 @@ func (cc *ClientConn) streamByID(id uint32, andRemove bool) *clientStream { // clientConnReadLoop is the state owned by the clientConn's frame-reading readLoop. type clientConnReadLoop struct { cc *ClientConn - activeRes map[uint32]*clientStream // keyed by streamID closeWhenIdle bool } // readLoop runs in its own goroutine and reads and dispatches frames. func (cc *ClientConn) readLoop() { - rl := &clientConnReadLoop{ - cc: cc, - activeRes: make(map[uint32]*clientStream), - } - + rl := &clientConnReadLoop{cc: cc} defer rl.cleanup() cc.readerErr = rl.run() if ce, ok := cc.readerErr.(ConnectionError); ok { @@ -1319,10 +1657,8 @@ func (rl *clientConnReadLoop) cleanup() { } else if err == io.EOF { err = io.ErrUnexpectedEOF } - for _, cs := range rl.activeRes { - cs.bufPipe.CloseWithError(err) - } for _, cs := range cc.streams { + cs.bufPipe.CloseWithError(err) // no-op if already closed select { case cs.resc <- resAndError{err: err}: default: @@ -1345,8 +1681,9 @@ func (rl *clientConnReadLoop) run() error { cc.vlogf("http2: Transport readFrame error on conn %p: (%T) %v", cc, err, err) } if se, ok := err.(StreamError); ok { - if cs := cc.streamByID(se.StreamID, true /*ended; remove it*/); cs != nil { + if cs := cc.streamByID(se.StreamID, false); cs != nil { cs.cc.writeStreamReset(cs.ID, se.Code, err) + cs.cc.forgetStreamID(cs.ID) if se.Cause == nil { se.Cause = cc.fr.errDetail } @@ -1399,7 +1736,7 @@ func (rl *clientConnReadLoop) run() error { } return err } - if rl.closeWhenIdle && gotReply && maybeIdle && len(rl.activeRes) == 0 { + if rl.closeWhenIdle && gotReply && maybeIdle { cc.closeIfIdle() } } @@ -1407,13 +1744,31 @@ func (rl *clientConnReadLoop) run() error { func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { cc := rl.cc - cs := cc.streamByID(f.StreamID, f.StreamEnded()) + cs := cc.streamByID(f.StreamID, false) if cs == nil { // We'd get here if we canceled a request while the // server had its response still in flight. So if this // was just something we canceled, ignore it. return nil } + if f.StreamEnded() { + // Issue 20521: If the stream has ended, streamByID() causes + // clientStream.done to be closed, which causes the request's bodyWriter + // to be closed with an errStreamClosed, which may be received by + // clientConn.RoundTrip before the result of processing these headers. + // Deferring stream closure allows the header processing to occur first. + // clientConn.RoundTrip may still receive the bodyWriter error first, but + // the fix for issue 16102 prioritises any response. + // + // Issue 22413: If there is no request body, we should close the + // stream before writing to cs.resc so that the stream is closed + // immediately once RoundTrip returns. + if cs.req.Body != nil { + defer cc.forgetStreamID(f.StreamID) + } else { + cc.forgetStreamID(f.StreamID) + } + } if !cs.firstByte { if cs.trace != nil { // TODO(bradfitz): move first response byte earlier, @@ -1437,6 +1792,7 @@ func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { } // Any other error type is a stream error. cs.cc.writeStreamReset(f.StreamID, ErrCodeProtocol, err) + cc.forgetStreamID(cs.ID) cs.resc <- resAndError{err: err} return nil // return nil from process* funcs to keep conn alive } @@ -1444,9 +1800,6 @@ func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { // (nil, nil) special case. See handleResponse docs. return nil } - if res.Body != noBody { - rl.activeRes[cs.ID] = cs - } cs.resTrailer = &res.Trailer cs.resc <- resAndError{res: res} return nil @@ -1457,8 +1810,7 @@ func (rl *clientConnReadLoop) processHeaders(f *MetaHeadersFrame) error { // is the detail. // // As a special case, handleResponse may return (nil, nil) to skip the -// frame (currently only used for 100 expect continue). This special -// case is going away after Issue 13851 is fixed. +// frame (currently only used for 1xx responses). func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFrame) (*http.Response, error) { if f.Truncated { return nil, errResponseHeaderListSize @@ -1466,20 +1818,11 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra status := f.PseudoValue("status") if status == "" { - return nil, errors.New("missing status pseudo header") + return nil, errors.New("malformed response from server: missing status pseudo header") } statusCode, err := strconv.Atoi(status) if err != nil { - return nil, errors.New("malformed non-numeric status pseudo header") - } - - if statusCode == 100 { - traceGot100Continue(cs.trace) - if cs.on100 != nil { - cs.on100() // forces any write delay timer to fire - } - cs.pastHeaders = false // do it all again - return nil, nil + return nil, errors.New("malformed response from server: malformed non-numeric status pseudo header") } header := make(http.Header) @@ -1506,6 +1849,27 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra } } + if statusCode >= 100 && statusCode <= 199 { + cs.num1xx++ + const max1xxResponses = 5 // arbitrary bound on number of informational responses, same as net/http + if cs.num1xx > max1xxResponses { + return nil, errors.New("http2: too many 1xx informational responses") + } + if fn := cs.get1xxTraceFunc(); fn != nil { + if err := fn(statusCode, textproto.MIMEHeader(header)); err != nil { + return nil, err + } + } + if statusCode == 100 { + traceGot100Continue(cs.trace) + if cs.on100 != nil { + cs.on100() // forces any write delay timer to fire + } + } + cs.pastHeaders = false // do it all again + return nil, nil + } + streamEnded := f.StreamEnded() isHead := cs.req.Method == "HEAD" if !streamEnded || isHead { @@ -1528,8 +1892,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra return res, nil } - buf := new(bytes.Buffer) // TODO(bradfitz): recycle this garbage - cs.bufPipe = pipe{b: buf} + cs.bufPipe = pipe{b: &dataBuffer{expected: res.ContentLength}} cs.bytesRemain = res.ContentLength res.Body = transportResponseBody{cs} go cs.awaitRequestCancel(cs.req) @@ -1539,7 +1902,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra res.Header.Del("Content-Length") res.ContentLength = -1 res.Body = &gzipReader{body: res.Body} - setResponseUncompressed(res) + res.Uncompressed = true } return res, nil } @@ -1656,6 +2019,7 @@ func (b transportResponseBody) Close() error { cc.wmu.Lock() if !serverSentStreamEnd { cc.fr.WriteRSTStream(cs.ID, ErrCodeCancel) + cs.didReset = true } // Return connection-level flow control. if unread > 0 { @@ -1668,6 +2032,7 @@ func (b transportResponseBody) Close() error { } cs.bufPipe.BreakWithError(errClosedResponseBody) + cc.forgetStreamID(cs.ID) return nil } @@ -1702,13 +2067,23 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { } return nil } + if !cs.firstByte { + cc.logf("protocol error: received DATA before a HEADERS frame") + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + }) + return nil + } if f.Length > 0 { - if len(data) > 0 && cs.bufPipe.b == nil { - // Data frame after it's already closed? - cc.logf("http2: Transport received DATA frame for closed stream; closing connection") - return ConnectionError(ErrCodeProtocol) + if cs.req.Method == "HEAD" && len(data) > 0 { + cc.logf("protocol error: received DATA on a HEAD request") + rl.endStreamError(cs, StreamError{ + StreamID: f.StreamID, + Code: ErrCodeProtocol, + }) + return nil } - // Check connection-level flow control. cc.mu.Lock() if cs.inflow.available() >= int32(f.Length) { @@ -1719,16 +2094,27 @@ func (rl *clientConnReadLoop) processData(f *DataFrame) error { } // Return any padded flow control now, since we won't // refund it later on body reads. - if pad := int32(f.Length) - int32(len(data)); pad > 0 { - cs.inflow.add(pad) - cc.inflow.add(pad) + var refund int + if pad := int(f.Length) - len(data); pad > 0 { + refund += pad + } + // Return len(data) now if the stream is already closed, + // since data will never be read. + didReset := cs.didReset + if didReset { + refund += len(data) + } + if refund > 0 { + cc.inflow.add(int32(refund)) cc.wmu.Lock() - cc.fr.WriteWindowUpdate(0, uint32(pad)) - cc.fr.WriteWindowUpdate(cs.ID, uint32(pad)) + cc.fr.WriteWindowUpdate(0, uint32(refund)) + if !didReset { + cs.inflow.add(int32(refund)) + cc.fr.WriteWindowUpdate(cs.ID, uint32(refund)) + } cc.bw.Flush() cc.wmu.Unlock() } - didReset := cs.didReset cc.mu.Unlock() if len(data) > 0 && !didReset { @@ -1759,11 +2145,10 @@ func (rl *clientConnReadLoop) endStreamError(cs *clientStream, err error) { err = io.EOF code = cs.copyTrailers } - cs.bufPipe.closeWithErrorAndCode(err, code) - delete(rl.activeRes, cs.ID) if isConnectionCloseRequest(cs.req) { rl.closeWhenIdle = true } + cs.bufPipe.closeWithErrorAndCode(err, code) select { case cs.resc <- resAndError{err: err}: @@ -1811,6 +2196,8 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error { cc.maxFrameSize = s.Val case SettingMaxConcurrentStreams: cc.maxConcurrentStreams = s.Val + case SettingMaxHeaderListSize: + cc.peerMaxHeaderListSize = uint64(s.Val) case SettingInitialWindowSize: // Values above the maximum flow-control // window size of 2^31-1 MUST be treated as a @@ -1888,13 +2275,11 @@ func (rl *clientConnReadLoop) processResetStream(f *RSTStreamFrame) error { cs.bufPipe.CloseWithError(err) cs.cc.cond.Broadcast() // wake up checkResetOrDone via clientStream.awaitFlowControl } - delete(rl.activeRes, cs.ID) return nil } // Ping sends a PING frame to the server and waits for the ack. -// Public implementation is in go17.go and not_go17.go -func (cc *ClientConn) ping(ctx contextContext) error { +func (cc *ClientConn) Ping(ctx context.Context) error { c := make(chan struct{}) // Generate a random payload var p [8]byte @@ -1977,6 +2362,7 @@ func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, err error) var ( errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit") + errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit") errPseudoTrailers = errors.New("http2: invalid pseudo header in trailers") ) @@ -2070,7 +2456,7 @@ func (t *Transport) getBodyWriterState(cs *clientStream, body io.Reader) (s body } s.delay = t.expectContinueTimeout() if s.delay == 0 || - !httplex.HeaderValuesContainsToken( + !httpguts.HeaderValuesContainsToken( cs.req.Header["Expect"], "100-continue") { return @@ -2125,5 +2511,93 @@ func (s bodyWriterState) scheduleBodyWrite() { // isConnectionCloseRequest reports whether req should use its own // connection for a single request and then close the connection. func isConnectionCloseRequest(req *http.Request) bool { - return req.Close || httplex.HeaderValuesContainsToken(req.Header["Connection"], "close") + return req.Close || httpguts.HeaderValuesContainsToken(req.Header["Connection"], "close") +} + +// registerHTTPSProtocol calls Transport.RegisterProtocol but +// converting panics into errors. +func registerHTTPSProtocol(t *http.Transport, rt noDialH2RoundTripper) (err error) { + defer func() { + if e := recover(); e != nil { + err = fmt.Errorf("%v", e) + } + }() + t.RegisterProtocol("https", rt) + return nil +} + +// noDialH2RoundTripper is a RoundTripper which only tries to complete the request +// if there's already has a cached connection to the host. +// (The field is exported so it can be accessed via reflect from net/http; tested +// by TestNoDialH2RoundTripperType) +type noDialH2RoundTripper struct{ *Transport } + +func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) { + res, err := rt.Transport.RoundTrip(req) + if isNoCachedConnError(err) { + return nil, http.ErrSkipAltProtocol + } + return res, err +} + +func (t *Transport) idleConnTimeout() time.Duration { + if t.t1 != nil { + return t.t1.IdleConnTimeout + } + return 0 +} + +func traceGetConn(req *http.Request, hostPort string) { + trace := httptrace.ContextClientTrace(req.Context()) + if trace == nil || trace.GetConn == nil { + return + } + trace.GetConn(hostPort) +} + +func traceGotConn(req *http.Request, cc *ClientConn) { + trace := httptrace.ContextClientTrace(req.Context()) + if trace == nil || trace.GotConn == nil { + return + } + ci := httptrace.GotConnInfo{Conn: cc.tconn} + cc.mu.Lock() + ci.Reused = cc.nextStreamID > 1 + ci.WasIdle = len(cc.streams) == 0 && ci.Reused + if ci.WasIdle && !cc.lastActive.IsZero() { + ci.IdleTime = time.Now().Sub(cc.lastActive) + } + cc.mu.Unlock() + + trace.GotConn(ci) +} + +func traceWroteHeaders(trace *httptrace.ClientTrace) { + if trace != nil && trace.WroteHeaders != nil { + trace.WroteHeaders() + } +} + +func traceGot100Continue(trace *httptrace.ClientTrace) { + if trace != nil && trace.Got100Continue != nil { + trace.Got100Continue() + } +} + +func traceWait100Continue(trace *httptrace.ClientTrace) { + if trace != nil && trace.Wait100Continue != nil { + trace.Wait100Continue() + } +} + +func traceWroteRequest(trace *httptrace.ClientTrace, err error) { + if trace != nil && trace.WroteRequest != nil { + trace.WroteRequest(httptrace.WroteRequestInfo{Err: err}) + } +} + +func traceFirstResponseByte(trace *httptrace.ClientTrace) { + if trace != nil && trace.GotFirstResponseByte != nil { + trace.GotFirstResponseByte() + } } diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go index 6b0dfae31..3849bc263 100644 --- a/vendor/golang.org/x/net/http2/write.go +++ b/vendor/golang.org/x/net/http2/write.go @@ -10,10 +10,9 @@ import ( "log" "net/http" "net/url" - "time" + "golang.org/x/net/http/httpguts" "golang.org/x/net/http2/hpack" - "golang.org/x/net/lex/httplex" ) // writeFramer is implemented by any type that is used to write frames. @@ -90,11 +89,7 @@ type writeGoAway struct { func (p *writeGoAway) writeFrame(ctx writeContext) error { err := ctx.Framer().WriteGoAway(p.maxStreamID, p.code, nil) - if p.code != 0 { - ctx.Flush() // ignore error: we're hanging up on them anyway - time.Sleep(50 * time.Millisecond) - ctx.CloseConn() - } + ctx.Flush() // ignore error: we're hanging up on them anyway return err } @@ -204,7 +199,7 @@ func (w *writeResHeaders) staysWithinBuffer(max int) bool { // TODO: this is a common one. It'd be nice to return true // here and get into the fast path if we could be clever and // calculate the size fast enough, or at least a conservative - // uppper bound that usually fires. (Maybe if w.h and + // upper bound that usually fires. (Maybe if w.h and // w.trailers are nil, so we don't need to enumerate it.) // Otherwise I'm afraid that just calculating the length to // answer this question would be slower than the ~2µs benefit. @@ -334,7 +329,7 @@ func (wu writeWindowUpdate) writeFrame(ctx writeContext) error { } // encodeHeaders encodes an http.Header. If keys is not nil, then (k, h[k]) -// is encoded only only if k is in keys. +// is encoded only if k is in keys. func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { if keys == nil { sorter := sorterPool.Get().(*sorter) @@ -355,7 +350,7 @@ func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) { } isTE := k == "transfer-encoding" for _, v := range vv { - if !httplex.ValidHeaderFieldValue(v) { + if !httpguts.ValidHeaderFieldValue(v) { // TODO: return an error? golang.org/issue/14048 // For now just omit it. continue diff --git a/vendor/golang.org/x/net/http2/writesched_priority.go b/vendor/golang.org/x/net/http2/writesched_priority.go index 01132721b..848fed6ec 100644 --- a/vendor/golang.org/x/net/http2/writesched_priority.go +++ b/vendor/golang.org/x/net/http2/writesched_priority.go @@ -53,7 +53,7 @@ type PriorityWriteSchedulerConfig struct { } // NewPriorityWriteScheduler constructs a WriteScheduler that schedules -// frames by following HTTP/2 priorities as described in RFC 7340 Section 5.3. +// frames by following HTTP/2 priorities as described in RFC 7540 Section 5.3. // If cfg is nil, default options are used. func NewPriorityWriteScheduler(cfg *PriorityWriteSchedulerConfig) WriteScheduler { if cfg == nil { |