Merge branch v3.1 into v3.2
This commit is contained in:
commit
be13b5b55d
22 changed files with 1616 additions and 1483 deletions
|
@ -1,6 +1,7 @@
|
|||
package compress
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -19,7 +20,7 @@ const (
|
|||
|
||||
type Encoding struct {
|
||||
Type string
|
||||
Weight *float64
|
||||
Weight float64
|
||||
}
|
||||
|
||||
func getCompressionEncoding(acceptEncoding []string, defaultEncoding string, supportedEncodings []string) string {
|
||||
|
@ -42,11 +43,11 @@ func getCompressionEncoding(acceptEncoding []string, defaultEncoding string, sup
|
|||
|
||||
encoding := encodings[0]
|
||||
|
||||
if encoding.Type == identityName && encoding.Weight != nil && *encoding.Weight == 0 {
|
||||
if encoding.Type == identityName && encoding.Weight == 0 {
|
||||
return notAcceptable
|
||||
}
|
||||
|
||||
if encoding.Type == wildcardName && encoding.Weight != nil && *encoding.Weight == 0 {
|
||||
if encoding.Type == wildcardName && encoding.Weight == 0 {
|
||||
return notAcceptable
|
||||
}
|
||||
|
||||
|
@ -87,11 +88,13 @@ func parseAcceptEncoding(acceptEncoding, supportedEncodings []string) ([]Encodin
|
|||
continue
|
||||
}
|
||||
|
||||
var weight *float64
|
||||
// If no "q" parameter is present, the default weight is 1.
|
||||
// https://www.rfc-editor.org/rfc/rfc9110.html#name-quality-values
|
||||
weight := 1.0
|
||||
if len(parsed) > 1 && strings.HasPrefix(parsed[1], "q=") {
|
||||
w, _ := strconv.ParseFloat(strings.TrimPrefix(parsed[1], "q="), 64)
|
||||
|
||||
weight = &w
|
||||
weight = w
|
||||
hasWeight = true
|
||||
}
|
||||
|
||||
|
@ -102,41 +105,9 @@ func parseAcceptEncoding(acceptEncoding, supportedEncodings []string) ([]Encodin
|
|||
}
|
||||
}
|
||||
|
||||
slices.SortFunc(encodings, compareEncoding)
|
||||
slices.SortFunc(encodings, func(a, b Encoding) int {
|
||||
return cmp.Compare(b.Weight, a.Weight)
|
||||
})
|
||||
|
||||
return encodings, hasWeight
|
||||
}
|
||||
|
||||
func compareEncoding(a, b Encoding) int {
|
||||
lhs, rhs := a.Weight, b.Weight
|
||||
|
||||
if lhs == nil && rhs == nil {
|
||||
return 0
|
||||
}
|
||||
|
||||
if lhs == nil && *rhs == 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
if lhs == nil {
|
||||
return 1
|
||||
}
|
||||
|
||||
if rhs == nil && *lhs == 0 {
|
||||
return 1
|
||||
}
|
||||
|
||||
if rhs == nil {
|
||||
return -1
|
||||
}
|
||||
|
||||
if *lhs < *rhs {
|
||||
return 1
|
||||
}
|
||||
|
||||
if *lhs > *rhs {
|
||||
return -1
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
|
|
@ -87,6 +87,12 @@ func Test_getCompressionEncoding(t *testing.T) {
|
|||
supportedEncodings: []string{zstdName, brotliName},
|
||||
expected: brotliName,
|
||||
},
|
||||
{
|
||||
desc: "mixed weight",
|
||||
acceptEncoding: []string{"gzip, br;q=0.9"},
|
||||
supportedEncodings: []string{gzipName, brotliName},
|
||||
expected: gzipName,
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
|
@ -116,10 +122,10 @@ func Test_parseAcceptEncoding(t *testing.T) {
|
|||
desc: "weight",
|
||||
values: []string{"br;q=1.0, zstd;q=0.9, gzip;q=0.8, *;q=0.1"},
|
||||
expected: []Encoding{
|
||||
{Type: brotliName, Weight: ptr[float64](1)},
|
||||
{Type: zstdName, Weight: ptr(0.9)},
|
||||
{Type: gzipName, Weight: ptr(0.8)},
|
||||
{Type: wildcardName, Weight: ptr(0.1)},
|
||||
{Type: brotliName, Weight: 1},
|
||||
{Type: zstdName, Weight: 0.9},
|
||||
{Type: gzipName, Weight: 0.8},
|
||||
{Type: wildcardName, Weight: 0.1},
|
||||
},
|
||||
assertWeight: assert.True,
|
||||
},
|
||||
|
@ -128,9 +134,9 @@ func Test_parseAcceptEncoding(t *testing.T) {
|
|||
values: []string{"br;q=1.0, zstd;q=0.9, gzip;q=0.8, *;q=0.1"},
|
||||
supportedEncodings: []string{brotliName, gzipName},
|
||||
expected: []Encoding{
|
||||
{Type: brotliName, Weight: ptr[float64](1)},
|
||||
{Type: gzipName, Weight: ptr(0.8)},
|
||||
{Type: wildcardName, Weight: ptr(0.1)},
|
||||
{Type: brotliName, Weight: 1},
|
||||
{Type: gzipName, Weight: 0.8},
|
||||
{Type: wildcardName, Weight: 0.1},
|
||||
},
|
||||
assertWeight: assert.True,
|
||||
},
|
||||
|
@ -138,10 +144,10 @@ func Test_parseAcceptEncoding(t *testing.T) {
|
|||
desc: "mixed",
|
||||
values: []string{"zstd,gzip, br;q=1.0, *;q=0"},
|
||||
expected: []Encoding{
|
||||
{Type: brotliName, Weight: ptr[float64](1)},
|
||||
{Type: zstdName},
|
||||
{Type: gzipName},
|
||||
{Type: wildcardName, Weight: ptr[float64](0)},
|
||||
{Type: zstdName, Weight: 1},
|
||||
{Type: gzipName, Weight: 1},
|
||||
{Type: brotliName, Weight: 1},
|
||||
{Type: wildcardName, Weight: 0},
|
||||
},
|
||||
assertWeight: assert.True,
|
||||
},
|
||||
|
@ -150,8 +156,8 @@ func Test_parseAcceptEncoding(t *testing.T) {
|
|||
values: []string{"zstd,gzip, br;q=1.0, *;q=0"},
|
||||
supportedEncodings: []string{zstdName},
|
||||
expected: []Encoding{
|
||||
{Type: zstdName},
|
||||
{Type: wildcardName, Weight: ptr[float64](0)},
|
||||
{Type: zstdName, Weight: 1},
|
||||
{Type: wildcardName, Weight: 0},
|
||||
},
|
||||
assertWeight: assert.True,
|
||||
},
|
||||
|
@ -159,10 +165,10 @@ func Test_parseAcceptEncoding(t *testing.T) {
|
|||
desc: "no weight",
|
||||
values: []string{"zstd, gzip, br, *"},
|
||||
expected: []Encoding{
|
||||
{Type: zstdName},
|
||||
{Type: gzipName},
|
||||
{Type: brotliName},
|
||||
{Type: wildcardName},
|
||||
{Type: zstdName, Weight: 1},
|
||||
{Type: gzipName, Weight: 1},
|
||||
{Type: brotliName, Weight: 1},
|
||||
{Type: wildcardName, Weight: 1},
|
||||
},
|
||||
assertWeight: assert.False,
|
||||
},
|
||||
|
@ -171,8 +177,8 @@ func Test_parseAcceptEncoding(t *testing.T) {
|
|||
values: []string{"zstd, gzip, br, *"},
|
||||
supportedEncodings: []string{"gzip"},
|
||||
expected: []Encoding{
|
||||
{Type: gzipName},
|
||||
{Type: wildcardName},
|
||||
{Type: gzipName, Weight: 1},
|
||||
{Type: wildcardName, Weight: 1},
|
||||
},
|
||||
assertWeight: assert.False,
|
||||
},
|
||||
|
@ -180,9 +186,9 @@ func Test_parseAcceptEncoding(t *testing.T) {
|
|||
desc: "weight and identity",
|
||||
values: []string{"gzip;q=1.0, identity; q=0.5, *;q=0"},
|
||||
expected: []Encoding{
|
||||
{Type: gzipName, Weight: ptr[float64](1)},
|
||||
{Type: identityName, Weight: ptr(0.5)},
|
||||
{Type: wildcardName, Weight: ptr[float64](0)},
|
||||
{Type: gzipName, Weight: 1},
|
||||
{Type: identityName, Weight: 0.5},
|
||||
{Type: wildcardName, Weight: 0},
|
||||
},
|
||||
assertWeight: assert.True,
|
||||
},
|
||||
|
@ -191,8 +197,8 @@ func Test_parseAcceptEncoding(t *testing.T) {
|
|||
values: []string{"gzip;q=1.0, identity; q=0.5, *;q=0"},
|
||||
supportedEncodings: []string{"br"},
|
||||
expected: []Encoding{
|
||||
{Type: identityName, Weight: ptr(0.5)},
|
||||
{Type: wildcardName, Weight: ptr[float64](0)},
|
||||
{Type: identityName, Weight: 0.5},
|
||||
{Type: wildcardName, Weight: 0},
|
||||
},
|
||||
assertWeight: assert.True,
|
||||
},
|
||||
|
@ -213,7 +219,3 @@ func Test_parseAcceptEncoding(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func ptr[T any](t T) *T {
|
||||
return &t
|
||||
}
|
||||
|
|
|
@ -688,39 +688,32 @@ func Test1xxResponses(t *testing.T) {
|
|||
assert.NotEqualValues(t, body, fakeBody)
|
||||
}
|
||||
|
||||
func BenchmarkCompress(b *testing.B) {
|
||||
func BenchmarkCompressGzip(b *testing.B) {
|
||||
runCompressionBenchmark(b, gzipName)
|
||||
}
|
||||
|
||||
func BenchmarkCompressBrotli(b *testing.B) {
|
||||
runCompressionBenchmark(b, brotliName)
|
||||
}
|
||||
|
||||
func BenchmarkCompressZstandard(b *testing.B) {
|
||||
runCompressionBenchmark(b, zstdName)
|
||||
}
|
||||
|
||||
func runCompressionBenchmark(b *testing.B, algorithm string) {
|
||||
b.Helper()
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
parallel bool
|
||||
size int
|
||||
}{
|
||||
{
|
||||
name: "2k",
|
||||
size: 2048,
|
||||
},
|
||||
{
|
||||
name: "20k",
|
||||
size: 20480,
|
||||
},
|
||||
{
|
||||
name: "100k",
|
||||
size: 102400,
|
||||
},
|
||||
{
|
||||
name: "2k parallel",
|
||||
parallel: true,
|
||||
size: 2048,
|
||||
},
|
||||
{
|
||||
name: "20k parallel",
|
||||
parallel: true,
|
||||
size: 20480,
|
||||
},
|
||||
{
|
||||
name: "100k parallel",
|
||||
parallel: true,
|
||||
size: 102400,
|
||||
},
|
||||
{"2k", false, 2048},
|
||||
{"20k", false, 20480},
|
||||
{"100k", false, 102400},
|
||||
{"2k parallel", true, 2048},
|
||||
{"20k parallel", true, 20480},
|
||||
{"100k parallel", true, 102400},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
|
@ -734,7 +727,7 @@ func BenchmarkCompress(b *testing.B) {
|
|||
handler, _ := New(context.Background(), next, dynamic.Compress{}, "testing")
|
||||
|
||||
req, _ := http.NewRequest(http.MethodGet, "/whatever", nil)
|
||||
req.Header.Set("Accept-Encoding", "gzip")
|
||||
req.Header.Set("Accept-Encoding", algorithm)
|
||||
|
||||
b.ReportAllocs()
|
||||
b.SetBytes(int64(test.size))
|
||||
|
@ -742,7 +735,7 @@ func BenchmarkCompress(b *testing.B) {
|
|||
b.ResetTimer()
|
||||
b.RunParallel(func(pb *testing.PB) {
|
||||
for pb.Next() {
|
||||
runBenchmark(b, req, handler)
|
||||
runBenchmark(b, req, handler, algorithm)
|
||||
}
|
||||
})
|
||||
return
|
||||
|
@ -750,13 +743,13 @@ func BenchmarkCompress(b *testing.B) {
|
|||
|
||||
b.ResetTimer()
|
||||
for range b.N {
|
||||
runBenchmark(b, req, handler)
|
||||
runBenchmark(b, req, handler, algorithm)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func runBenchmark(b *testing.B, req *http.Request, handler http.Handler) {
|
||||
func runBenchmark(b *testing.B, req *http.Request, handler http.Handler, algorithm string) {
|
||||
b.Helper()
|
||||
|
||||
res := httptest.NewRecorder()
|
||||
|
@ -765,7 +758,7 @@ func runBenchmark(b *testing.B, req *http.Request, handler http.Handler) {
|
|||
b.Fatalf("Expected 200 but got %d", code)
|
||||
}
|
||||
|
||||
assert.Equal(b, gzipName, res.Header().Get(contentEncodingHeader))
|
||||
assert.Equal(b, algorithm, res.Header().Get(contentEncodingHeader))
|
||||
}
|
||||
|
||||
func generateBytes(length int) []byte {
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"mime"
|
||||
"net"
|
||||
"net/http"
|
||||
"sync"
|
||||
|
||||
"github.com/andybalholm/brotli"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
|
@ -45,6 +46,7 @@ type CompressionHandler struct {
|
|||
excludedContentTypes []parsedContentType
|
||||
includedContentTypes []parsedContentType
|
||||
next http.Handler
|
||||
writerPool sync.Pool
|
||||
}
|
||||
|
||||
// NewCompressionHandler returns a new compressing handler.
|
||||
|
@ -92,7 +94,7 @@ func NewCompressionHandler(cfg Config, next http.Handler) (http.Handler, error)
|
|||
func (c *CompressionHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request) {
|
||||
rw.Header().Add(vary, acceptEncoding)
|
||||
|
||||
compressionWriter, err := newCompressionWriter(c.cfg.Algorithm, rw)
|
||||
compressionWriter, err := c.getCompressionWriter(rw)
|
||||
if err != nil {
|
||||
logger := middlewares.GetLogger(r.Context(), c.cfg.MiddlewareName, typeName)
|
||||
logger.Debug().Msgf("Create compression handler: %v", err)
|
||||
|
@ -100,6 +102,7 @@ func (c *CompressionHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request)
|
|||
rw.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
defer c.putCompressionWriter(compressionWriter)
|
||||
|
||||
responseWriter := &responseWriter{
|
||||
rw: rw,
|
||||
|
@ -130,6 +133,8 @@ type compression interface {
|
|||
// as it would otherwise send some extra "end of compression" bytes.
|
||||
// Close also makes sure to flush whatever was left to write from the buffer.
|
||||
Close() error
|
||||
// Reset reinitializes the state of the encoder, allowing it to be reused.
|
||||
Reset(w io.Writer)
|
||||
}
|
||||
|
||||
type compressionWriter struct {
|
||||
|
@ -137,6 +142,19 @@ type compressionWriter struct {
|
|||
alg string
|
||||
}
|
||||
|
||||
func (c *CompressionHandler) getCompressionWriter(rw io.Writer) (*compressionWriter, error) {
|
||||
if writer, ok := c.writerPool.Get().(*compressionWriter); ok {
|
||||
writer.compression.Reset(rw)
|
||||
return writer, nil
|
||||
}
|
||||
return newCompressionWriter(c.cfg.Algorithm, rw)
|
||||
}
|
||||
|
||||
func (c *CompressionHandler) putCompressionWriter(writer *compressionWriter) {
|
||||
writer.Reset(nil)
|
||||
c.writerPool.Put(writer)
|
||||
}
|
||||
|
||||
func newCompressionWriter(algo string, in io.Writer) (*compressionWriter, error) {
|
||||
switch algo {
|
||||
case brotliName:
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/http-wasm/http-wasm-host-go/handler"
|
||||
|
@ -135,7 +136,19 @@ func (b *wasmMiddlewareBuilder) buildMiddleware(ctx context.Context, next http.H
|
|||
return nil, nil, fmt.Errorf("creating middleware: %w", err)
|
||||
}
|
||||
|
||||
return mw.NewHandler(ctx, next), applyCtx, nil
|
||||
h := mw.NewHandler(ctx, next)
|
||||
|
||||
// Traefik does not Close the middleware when creating a new instance on a configuration change.
|
||||
// When the middleware is marked to be GC, we need to close it so the wasm instance is properly closed.
|
||||
// Reference: https://github.com/traefik/traefik/issues/11119
|
||||
runtime.SetFinalizer(h, func(_ http.Handler) {
|
||||
if err := mw.Close(ctx); err != nil {
|
||||
logger.Err(err).Msg("[wasm] middleware Close failed")
|
||||
} else {
|
||||
logger.Debug().Msg("[wasm] middleware Close ok")
|
||||
}
|
||||
})
|
||||
return h, applyCtx, nil
|
||||
}
|
||||
|
||||
// WasmMiddleware is an HTTP handler plugin wrapper.
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue