Merge branch v3.3 into master
This commit is contained in:
commit
f0849e8ee6
23 changed files with 470 additions and 191 deletions
|
@ -180,3 +180,10 @@ please use respectively `acme.dnsChallenge.propagation.delayBeforeChecks` and `a
|
|||
In `v3.3`, the `tracing.globalAttributes` option has been deprecated, please use the `tracing.resourceAttributes` option instead.
|
||||
The `tracing.globalAttributes` option is misleading as its name does not reflect the operation of adding resource attributes to be sent to the collector,
|
||||
and will be removed in the next major version.
|
||||
|
||||
## v3.3.4
|
||||
|
||||
### OpenTelemetry Request Duration metric
|
||||
|
||||
In `v3.3.4`, the OpenTelemetry Request Duration metric (named `traefik_(entrypoint|router|service)_request_duration_seconds`) unit has been changed from milliseconds to seconds.
|
||||
To be consistent with the naming and other metrics providers, the metric now reports the duration in seconds.
|
||||
|
|
|
@ -163,7 +163,7 @@ tracing:
|
|||
|
||||
#### `safeQueryParams`
|
||||
|
||||
_Optional, Default={}_
|
||||
_Optional, Default=[]_
|
||||
|
||||
By default, all query parameters are redacted.
|
||||
Defines the list of query parameters to not redact.
|
||||
|
|
|
@ -57,6 +57,7 @@ additionalArguments:
|
|||
| Field | Description | Default | Required |
|
||||
|:------|:------------|:--------|:---------|
|
||||
| `address` | Define the port, and optionally the hostname, on which to listen for incoming connections and packets.<br /> It also defines the protocol to use (TCP or UDP).<br /> If no protocol is specified, the default is TCP. The format is:`[host]:port[/tcp\|/udp]`. | - | Yes |
|
||||
| `accessLogs` | Defines whether a router attached to this EntryPoint produces access-logs by default. Nonetheless, a router defining its own observability configuration will opt-out from this default. | true | No |
|
||||
| `asDefault` | Mark the `entryPoint` to be in the list of default `entryPoints`.<br /> `entryPoints`in this list are used (by default) on HTTP and TCP routers that do not define their own `entryPoints` option.<br /> More information [here](#asdefault). | false | No |
|
||||
| `forwardedHeaders.trustedIPs` | Set the IPs or CIDR from where Traefik trusts the forwarded headers information (`X-Forwarded-*`). | - | No |
|
||||
| `forwardedHeaders.insecure` | Set the insecure mode to always trust the forwarded headers information (`X-Forwarded-*`).<br />We recommend to use this option only for tests purposes, not in production. | false | No |
|
||||
|
@ -72,9 +73,11 @@ additionalArguments:
|
|||
| `http2.maxConcurrentStreams` | Set the number of concurrent streams per connection that each client is allowed to initiate. <br /> The value must be greater than zero. | 250 | No |
|
||||
| `http3` | Enable HTTP/3 protocol on the `entryPoint`. <br /> HTTP/3 requires a TCP `entryPoint`. as HTTP/3 always starts as a TCP connection that then gets upgraded to UDP. In most scenarios, this `entryPoint` is the same as the one used for TLS traffic.<br /> More information [here](#http3. | - | No |
|
||||
| `http3.advertisedPort` | Set the UDP port to advertise as the HTTP/3 authority. <br /> It defaults to the entryPoint's address port. <br /> It can be used to override the authority in the `alt-svc` header, for example if the public facing port is different from where Traefik is listening. | - | No |
|
||||
| `metrics` | Defines whether a router attached to this EntryPoint produces metrics by default. Nonetheless, a router defining its own observability configuration will opt-out from this default.| true | No |
|
||||
| `proxyProtocol.trustedIPs` | Enable PROXY protocol with Trusted IPs. <br /> Traefik supports [PROXY protocol](https://www.haproxy.org/download/2.0/doc/proxy-protocol.txt) version 1 and 2. <br /> If PROXY protocol header parsing is enabled for the entry point, this entry point can accept connections with or without PROXY protocol headers. <br /> If the PROXY protocol header is passed, then the version is determined automatically.<br /> More information [here](#proxyprotocol-and-load-balancers).| - | No |
|
||||
| `proxyProtocol.insecure` | Enable PROXY protocol trusting every incoming connection. <br /> Every remote client address will be replaced (`trustedIPs`) won't have any effect). <br /> Traefik supports [PROXY protocol](https://www.haproxy.org/download/2.0/doc/proxy-protocol.txt) version 1 and 2. <br /> If PROXY protocol header parsing is enabled for the entry point, this entry point can accept connections with or without PROXY protocol headers. <br /> If the PROXY protocol header is passed, then the version is determined automatically.<br />We recommend to use this option only for tests purposes, not in production.<br /> More information [here](#proxyprotocol-and-load-balancers). | - | No |
|
||||
| `reusePort` | Enable `entryPoints` from the same or different processes listening on the same TCP/UDP port by utilizing the `SO_REUSEPORT` socket option. <br /> It also allows the kernel to act like a load balancer to distribute incoming connections between entry points..<br /> More information [here](#reuseport). | false | No |
|
||||
| `tracing` | Defines whether a router attached to this EntryPoint produces traces by default. Nonetheless, a router defining its own observability configuration will opt-out from this default.| true | No |
|
||||
| `transport.`<br />`respondingTimeouts.`<br />`readTimeout` | Set the timeouts for incoming requests to the Traefik instance. This is the maximum duration for reading the entire request, including the body. Setting them has no effect for UDP `entryPoints`.<br /> If zero, no timeout exists. <br />Can be provided in a format supported by [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) or as raw values (digits).<br />If no units are provided, the value is parsed assuming seconds. | 60s (seconds) | No |
|
||||
| `transport.`<br />`respondingTimeouts.`<br />`writeTimeout` | Maximum duration before timing out writes of the response. <br /> It covers the time from the end of the request header read to the end of the response write. <br /> If zero, no timeout exists. <br />Can be provided in a format supported by [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) or as raw values (digits).<br />If no units are provided, the value is parsed assuming seconds. | 0s (seconds) | No |
|
||||
| `transport.`<br />`respondingTimeouts.`<br />`idleTimeout` | Maximum duration an idle (keep-alive) connection will remain idle before closing itself. <br /> If zero, no timeout exists <br />Can be provided in a format supported by [time.ParseDuration](https://golang.org/pkg/time/#ParseDuration) or as raw values (digits).<br />If no units are provided, the value is parsed assuming seconds| 180s (seconds) | No |
|
||||
|
|
|
@ -36,27 +36,27 @@ tracing: {}
|
|||
|
||||
## Configuration Options
|
||||
|
||||
| Field | Description | Default | Required |
|
||||
|:-----------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:--------|:---------|
|
||||
| `tracing.addInternals` | Enables tracing for internal resources (e.g.: `ping@internal`). | false | No |
|
||||
| `tracing.serviceName` | Service name used in selected backend. | "traefik" | No |
|
||||
| `tracing.sampleRate` | The proportion of requests to trace, specified between 0.0 and 1.0. | 1.0 | No |
|
||||
| `tracing.globalAttributes` | Applies a list of shared key:value attributes on all spans. | {} | No |
|
||||
| `tracing.capturedRequestHeaders` | Defines the list of request headers to add as attributes.<br />It applies to client and server kind spans.| {} | No |
|
||||
| `tracing.capturedResponseHeaders` | Defines the list of response headers to add as attributes.<br />It applies to client and server kind spans.| {} |False |
|
||||
| `tracing.safeQueryParams` | By default, all query parameters are redacted.<br />Defines the list of query parameters to not redact. | {} | No |
|
||||
| `tracing.otlp.http` | This instructs the exporter to send the tracing to the OpenTelemetry Collector using HTTP.<br /> Setting the sub-options with their default values. | null/false | No |
|
||||
| `tracing.otlp.http.endpoint` | URL of the OpenTelemetry Collector to send tracing to.<br /> Format="`<scheme>://<host>:<port><path>`" | "http://localhost:4318/v1/tracing" | Yes |
|
||||
| `tracing.otlp.http.headers` | Additional headers sent with tracing by the exporter to the OpenTelemetry Collector. | | No |
|
||||
| `tracing.otlp.http.tls.ca` | Path to the certificate authority used for the secure connection to the OpenTelemetry Collector, it defaults to the system bundle. | "" | No |
|
||||
| `tracing.otlp.http.tls.cert` | Path to the public certificate used for the secure connection to the OpenTelemetry Collector. When using this option, setting the `key` option is required. | "" | No |
|
||||
| `tracing.otlp.http.tls.key` | This instructs the exporter to send the tracing to the OpenTelemetry Collector using HTTP.<br /> Setting the sub-options with their default values. | ""null/false "" | No |
|
||||
| `tracing.otlp.http.tls.insecureskipverify` |If `insecureSkipVerify` is `true`, the TLS connection to the OpenTelemetry Collector accepts any certificate presented by the server regardless of the hostnames it covers. | false | Yes |
|
||||
| `tracing.otlp.grpc` | This instructs the exporter to send tracing to the OpenTelemetry Collector using gRPC. | false | No |
|
||||
| `tracing.otlp.grpc.endpoint` | Address of the OpenTelemetry Collector to send tracing to.<br /> Format="`<host>:<port>`" | "localhost:4317" | Yes |
|
||||
| `tracing.otlp.grpc.headers` | Additional headers sent with tracing by the exporter to the OpenTelemetry Collector. | {} | No |
|
||||
| `tracing.otlp.grpc.insecure` |Allows exporter to send tracing to the OpenTelemetry Collector without using a secured protocol. | false | Yes |
|
||||
| `tracing.otlp.grpc.tls.ca` | Path to the certificate authority used for the secure connection to the OpenTelemetry Collector, it defaults to the system bundle. | "" | No |
|
||||
| `tracing.otlp.grpc.tls.cert` | Path to the public certificate used for the secure connection to the OpenTelemetry Collector. When using this option, setting the `key` option is required. | "" | No |
|
||||
| `tracing.otlp.grpc.tls.key` | This instructs the exporter to send the tracing to the OpenTelemetry Collector using HTTP.<br /> Setting the sub-options with their default values. | ""null/false "" | No |
|
||||
| `tracing.otlp.grpc.tls.insecureskipverify` |If `insecureSkipVerify` is `true`, the TLS connection to the OpenTelemetry Collector accepts any certificate presented by the server regardless of the hostnames it covers. | false | Yes |
|
||||
| Field | Description | Default | Required |
|
||||
|:-------------------------------------------|:--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:-----------------------------------|:---------|
|
||||
| `tracing.addInternals` | Enables tracing for internal resources (e.g.: `ping@internal`). | false | No |
|
||||
| `tracing.serviceName` | Service name used in selected backend. | "traefik" | No |
|
||||
| `tracing.sampleRate` | The proportion of requests to trace, specified between 0.0 and 1.0. | 1.0 | No |
|
||||
| `tracing.resourceAttributes` | Defines additional resource attributes to be sent to the collector. | [] | No |
|
||||
| `tracing.capturedRequestHeaders` | Defines the list of request headers to add as attributes.<br />It applies to client and server kind spans.| [] | No |
|
||||
| `tracing.capturedResponseHeaders` | Defines the list of response headers to add as attributes.<br />It applies to client and server kind spans.| [] |False |
|
||||
| `tracing.safeQueryParams` | By default, all query parameters are redacted.<br />Defines the list of query parameters to not redact. | [] | No |
|
||||
| `tracing.otlp.http` | This instructs the exporter to send the tracing to the OpenTelemetry Collector using HTTP.<br /> Setting the sub-options with their default values. | null/false | No |
|
||||
| `tracing.otlp.http.endpoint` | URL of the OpenTelemetry Collector to send tracing to.<br /> Format="`<scheme>://<host>:<port><path>`" | "http://localhost:4318/v1/tracing" | Yes |
|
||||
| `tracing.otlp.http.headers` | Additional headers sent with tracing by the exporter to the OpenTelemetry Collector. | | No |
|
||||
| `tracing.otlp.http.tls.ca` | Path to the certificate authority used for the secure connection to the OpenTelemetry Collector, it defaults to the system bundle. | "" | No |
|
||||
| `tracing.otlp.http.tls.cert` | Path to the public certificate used for the secure connection to the OpenTelemetry Collector. When using this option, setting the `key` option is required. | "" | No |
|
||||
| `tracing.otlp.http.tls.key` | This instructs the exporter to send the tracing to the OpenTelemetry Collector using HTTP.<br /> Setting the sub-options with their default values. | ""null/false "" | No |
|
||||
| `tracing.otlp.http.tls.insecureskipverify` |If `insecureSkipVerify` is `true`, the TLS connection to the OpenTelemetry Collector accepts any certificate presented by the server regardless of the hostnames it covers. | false | Yes |
|
||||
| `tracing.otlp.grpc` | This instructs the exporter to send tracing to the OpenTelemetry Collector using gRPC. | false | No |
|
||||
| `tracing.otlp.grpc.endpoint` | Address of the OpenTelemetry Collector to send tracing to.<br /> Format="`<host>:<port>`" | "localhost:4317" | Yes |
|
||||
| `tracing.otlp.grpc.headers` | Additional headers sent with tracing by the exporter to the OpenTelemetry Collector. | [] | No |
|
||||
| `tracing.otlp.grpc.insecure` |Allows exporter to send tracing to the OpenTelemetry Collector without using a secured protocol. | false | Yes |
|
||||
| `tracing.otlp.grpc.tls.ca` | Path to the certificate authority used for the secure connection to the OpenTelemetry Collector, it defaults to the system bundle. | "" | No |
|
||||
| `tracing.otlp.grpc.tls.cert` | Path to the public certificate used for the secure connection to the OpenTelemetry Collector. When using this option, setting the `key` option is required. | "" | No |
|
||||
| `tracing.otlp.grpc.tls.key` | This instructs the exporter to send the tracing to the OpenTelemetry Collector using HTTP.<br /> Setting the sub-options with their default values. | ""null/false "" | No |
|
||||
| `tracing.otlp.grpc.tls.insecureskipverify` |If `insecureSkipVerify` is `true`, the TLS connection to the OpenTelemetry Collector accepts any certificate presented by the server regardless of the hostnames it covers. | false | Yes |
|
||||
|
|
|
@ -52,21 +52,21 @@ providers:
|
|||
|
||||
## Configuration Options
|
||||
|
||||
| Field | Description | Default | Required |
|
||||
|:------|:----------------------------------------------------------|:---------------------|:---------|
|
||||
| `providers.providersThrottleDuration` | Minimum amount of time to wait for, after a configuration reload, before taking into account any new configuration refresh event.<br />If multiple events occur within this time, only the most recent one is taken into account, and all others are discarded.<br />**This option cannot be set per provider, but the throttling algorithm applies to each of them independently.** | 2s | No |
|
||||
| `providers.kubernetesCRD.endpoint` | Server endpoint URL.<br />More information [here](#endpoint). | "" | No |
|
||||
| `providers.kubernetesCRD.token` | Bearer token used for the Kubernetes client configuration. | "" | No |
|
||||
| `providers.kubernetesCRD.certAuthFilePath` | Path to the certificate authority file.<br />Used for the Kubernetes client configuration. | "" | No |
|
||||
| `providers.kubernetesCRD.namespaces` | Array of namespaces to watch.<br />If left empty, watch all namespaces. | {} | No |
|
||||
| `providers.kubernetesCRD.labelselector` | Allow filtering on specific resource objects only using label selectors.<br />Only to Traefik [Custom Resources](#list-of-resources) (they all must match the filter).<br />No effect on Kubernetes `Secrets`, `EndpointSlices` and `Services`.<br />See [label-selectors](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors) for details. | "" | No |
|
||||
| `providers.kubernetesCRD.ingressClass` | Value of `kubernetes.io/ingress.class` annotation that identifies resource objects to be processed.<br />If empty, resources missing the annotation, having an empty value, or the value `traefik` are processed. | "" | No |
|
||||
| `providers.kubernetesCRD.throttleDuration` | Minimum amount of time to wait between two Kubernetes events before producing a new configuration.<br />This prevents a Kubernetes cluster that updates many times per second from continuously changing your Traefik configuration.<br />If empty, every event is caught. | 0s | No |
|
||||
| `providers.kubernetesCRD.allowEmptyServices` | Allows creating a route to reach a service that has no endpoint available.<br />It allows Traefik to handle the requests and responses targeting this service (applying middleware or observability operations) before returning a `503` HTTP Status. | false | No |
|
||||
| `providers.kubernetesCRD.allowCrossNamespace` | Allows the `IngressRoutes` to reference resources in namespaces other than theirs. | false | No |
|
||||
| `providers.kubernetesCRD.allowExternalNameServices` | Allows the `IngressRoutes` to reference ExternalName services. | false | No |
|
||||
| `providers.kubernetesCRD.nativeLBByDefault` | Allow using the Kubernetes Service load balancing between the pods instead of the one provided by Traefik for every `IngressRoute` by default.<br />It can br overridden in the [`ServerTransport`](../../../../routing/services/index.md#serverstransport). | false | No |
|
||||
| `providers.kubernetesCRD.disableClusterScopeResources` | Prevent from discovering cluster scope resources (`IngressClass` and `Nodes`).<br />By doing so, it alleviates the requirement of giving Traefik the rights to look up for cluster resources.<br />Furthermore, Traefik will not handle IngressRoutes with IngressClass references, therefore such Ingresses will be ignored (please note that annotations are not affected by this option).<br />This will also prevent from using the `NodePortLB` options on services. | false | No |
|
||||
| Field | Description | Default | Required |
|
||||
|:------|:----------------------------------------------------------|:--------|:---------|
|
||||
| `providers.providersThrottleDuration` | Minimum amount of time to wait for, after a configuration reload, before taking into account any new configuration refresh event.<br />If multiple events occur within this time, only the most recent one is taken into account, and all others are discarded.<br />**This option cannot be set per provider, but the throttling algorithm applies to each of them independently.** | 2s | No |
|
||||
| `providers.kubernetesCRD.endpoint` | Server endpoint URL.<br />More information [here](#endpoint). | "" | No |
|
||||
| `providers.kubernetesCRD.token` | Bearer token used for the Kubernetes client configuration. | "" | No |
|
||||
| `providers.kubernetesCRD.certAuthFilePath` | Path to the certificate authority file.<br />Used for the Kubernetes client configuration. | "" | No |
|
||||
| `providers.kubernetesCRD.namespaces` | Array of namespaces to watch.<br />If left empty, watch all namespaces. | [] | No |
|
||||
| `providers.kubernetesCRD.labelselector` | Allow filtering on specific resource objects only using label selectors.<br />Only to Traefik [Custom Resources](#list-of-resources) (they all must match the filter).<br />No effect on Kubernetes `Secrets`, `EndpointSlices` and `Services`.<br />See [label-selectors](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors) for details. | "" | No |
|
||||
| `providers.kubernetesCRD.ingressClass` | Value of `kubernetes.io/ingress.class` annotation that identifies resource objects to be processed.<br />If empty, resources missing the annotation, having an empty value, or the value `traefik` are processed. | "" | No |
|
||||
| `providers.kubernetesCRD.throttleDuration` | Minimum amount of time to wait between two Kubernetes events before producing a new configuration.<br />This prevents a Kubernetes cluster that updates many times per second from continuously changing your Traefik configuration.<br />If empty, every event is caught. | 0s | No |
|
||||
| `providers.kubernetesCRD.allowEmptyServices` | Allows creating a route to reach a service that has no endpoint available.<br />It allows Traefik to handle the requests and responses targeting this service (applying middleware or observability operations) before returning a `503` HTTP Status. | false | No |
|
||||
| `providers.kubernetesCRD.allowCrossNamespace` | Allows the `IngressRoutes` to reference resources in namespaces other than theirs. | false | No |
|
||||
| `providers.kubernetesCRD.allowExternalNameServices` | Allows the `IngressRoutes` to reference ExternalName services. | false | No |
|
||||
| `providers.kubernetesCRD.nativeLBByDefault` | Allow using the Kubernetes Service load balancing between the pods instead of the one provided by Traefik for every `IngressRoute` by default.<br />It can br overridden in the [`ServerTransport`](../../../../routing/services/index.md#serverstransport). | false | No |
|
||||
| `providers.kubernetesCRD.disableClusterScopeResources` | Prevent from discovering cluster scope resources (`IngressClass` and `Nodes`).<br />By doing so, it alleviates the requirement of giving Traefik the rights to look up for cluster resources.<br />Furthermore, Traefik will not handle IngressRoutes with IngressClass references, therefore such Ingresses will be ignored (please note that annotations are not affected by this option).<br />This will also prevent from using the `NodePortLB` options on services. | false | No |
|
||||
|
||||
### endpoint
|
||||
|
||||
|
|
|
@ -67,20 +67,20 @@ providers:
|
|||
|
||||
<!-- markdownlint-disable MD013 -->
|
||||
|
||||
| Field | Description | Default | Required |
|
||||
|:------|:----------------------------------------------------------|:---------------------|:---------|
|
||||
| `providers.providersThrottleDuration` | Minimum amount of time to wait for, after a configuration reload, before taking into account any new configuration refresh event.<br />If multiple events occur within this time, only the most recent one is taken into account, and all others are discarded.<br />**This option cannot be set per provider, but the throttling algorithm applies to each of them independently.** | 2s | No |
|
||||
| `providers.kubernetesGateway.endpoint` | Server endpoint URL.<br />More information [here](#endpoint). | "" | No |
|
||||
| `providers.kubernetesGateway.experimentalChannel` | Toggles support for the Experimental Channel resources ([Gateway API release channels documentation](https://gateway-api.sigs.k8s.io/concepts/versioning/#release-channels)).<br />(ex: `TCPRoute` and `TLSRoute`)| false | No |
|
||||
| `providers.kubernetesGateway.token` | Bearer token used for the Kubernetes client configuration. | "" | No |
|
||||
| `providers.kubernetesGateway.certAuthFilePath` | Path to the certificate authority file.<br />Used for the Kubernetes client configuration. | "" | No |
|
||||
| `providers.kubernetesGateway.namespaces` | Array of namespaces to watch.<br />If left empty, watch all namespaces. | {} | No |
|
||||
| `providers.kubernetesGateway.labelselector` | Allow filtering on specific resource objects only using label selectors.<br />Only to Traefik [Custom Resources](./kubernetes-crd.md#list-of-resources) (they all must match the filter).<br />No effect on Kubernetes `Secrets`, `EndpointSlices` and `Services`.<br />See [label-selectors](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors) for details. | "" | No |
|
||||
| `providers.kubernetesGateway.throttleDuration` | Minimum amount of time to wait between two Kubernetes events before producing a new configuration.<br />This prevents a Kubernetes cluster that updates many times per second from continuously changing your Traefik configuration.<br />If empty, every event is caught. | 0s | No |
|
||||
| `providers.kubernetesGateway.nativeLBByDefault` | Defines whether to use Native Kubernetes load-balancing mode by default. For more information, please check out the `traefik.io/service.nativelb` service annotation documentation. | false | No |
|
||||
| `providers.kubernetesGateway.`<br />`statusAddress.hostname` | Hostname copied to the Gateway `status.addresses`. | "" | No |
|
||||
| `providers.kubernetesGateway.`<br />`statusAddress.ip` | IP address copied to the Gateway `status.addresses`, and currently only supports one IP value (IPv4 or IPv6). | "" | No |
|
||||
| `providers.kubernetesGateway.`<br />`statusAddress.publishedService` | The Kubernetes service to copy status addresses from.<br />When using third parties tools like External-DNS, this option can be used to copy the service `loadbalancer.status` (containing the service's endpoints IPs) to the gateways. | "" | No |
|
||||
| Field | Description | Default | Required |
|
||||
|:------|:----------------------------------------------------------|:--------|:---------|
|
||||
| `providers.providersThrottleDuration` | Minimum amount of time to wait for, after a configuration reload, before taking into account any new configuration refresh event.<br />If multiple events occur within this time, only the most recent one is taken into account, and all others are discarded.<br />**This option cannot be set per provider, but the throttling algorithm applies to each of them independently.** | 2s | No |
|
||||
| `providers.kubernetesGateway.endpoint` | Server endpoint URL.<br />More information [here](#endpoint). | "" | No |
|
||||
| `providers.kubernetesGateway.experimentalChannel` | Toggles support for the Experimental Channel resources ([Gateway API release channels documentation](https://gateway-api.sigs.k8s.io/concepts/versioning/#release-channels)).<br />(ex: `TCPRoute` and `TLSRoute`)| false | No |
|
||||
| `providers.kubernetesGateway.token` | Bearer token used for the Kubernetes client configuration. | "" | No |
|
||||
| `providers.kubernetesGateway.certAuthFilePath` | Path to the certificate authority file.<br />Used for the Kubernetes client configuration. | "" | No |
|
||||
| `providers.kubernetesGateway.namespaces` | Array of namespaces to watch.<br />If left empty, watch all namespaces. | [] | No |
|
||||
| `providers.kubernetesGateway.labelselector` | Allow filtering on specific resource objects only using label selectors.<br />Only to Traefik [Custom Resources](./kubernetes-crd.md#list-of-resources) (they all must match the filter).<br />No effect on Kubernetes `Secrets`, `EndpointSlices` and `Services`.<br />See [label-selectors](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/#label-selectors) for details. | "" | No |
|
||||
| `providers.kubernetesGateway.throttleDuration` | Minimum amount of time to wait between two Kubernetes events before producing a new configuration.<br />This prevents a Kubernetes cluster that updates many times per second from continuously changing your Traefik configuration.<br />If empty, every event is caught. | 0s | No |
|
||||
| `providers.kubernetesGateway.nativeLBByDefault` | Defines whether to use Native Kubernetes load-balancing mode by default. For more information, please check out the `traefik.io/service.nativelb` service annotation documentation. | false | No |
|
||||
| `providers.kubernetesGateway.`<br />`statusAddress.hostname` | Hostname copied to the Gateway `status.addresses`. | "" | No |
|
||||
| `providers.kubernetesGateway.`<br />`statusAddress.ip` | IP address copied to the Gateway `status.addresses`, and currently only supports one IP value (IPv4 or IPv6). | "" | No |
|
||||
| `providers.kubernetesGateway.`<br />`statusAddress.publishedService` | The Kubernetes service to copy status addresses from.<br />When using third parties tools like External-DNS, this option can be used to copy the service `loadbalancer.status` (containing the service's endpoints IPs) to the gateways. | "" | No |
|
||||
|
||||
<!-- markdownlint-enable MD013 -->
|
||||
|
||||
|
|
6
go.mod
6
go.mod
|
@ -6,7 +6,7 @@ require (
|
|||
github.com/BurntSushi/toml v1.4.0
|
||||
github.com/Masterminds/sprig/v3 v3.2.3
|
||||
github.com/abbot/go-http-auth v0.0.0-00010101000000-000000000000 // No tag on the repo.
|
||||
github.com/andybalholm/brotli v1.1.0
|
||||
github.com/andybalholm/brotli v1.1.1
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5
|
||||
github.com/aws/aws-sdk-go v1.44.327
|
||||
github.com/cenkalti/backoff/v4 v4.3.0
|
||||
|
@ -33,7 +33,7 @@ require (
|
|||
github.com/http-wasm/http-wasm-host-go v0.7.0
|
||||
github.com/influxdata/influxdb-client-go/v2 v2.7.0
|
||||
github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab // No tag on the repo.
|
||||
github.com/klauspost/compress v1.17.11-0.20241004063537-dbd6c381492a // Required to have the content-type fix: https://github.com/klauspost/compress/pull/1013
|
||||
github.com/klauspost/compress v1.17.11
|
||||
github.com/kvtools/consul v1.0.2
|
||||
github.com/kvtools/etcdv3 v1.0.2
|
||||
github.com/kvtools/redis v1.1.0
|
||||
|
@ -67,7 +67,7 @@ require (
|
|||
github.com/traefik/yaegi v0.16.1
|
||||
github.com/unrolled/render v1.0.2
|
||||
github.com/unrolled/secure v1.0.9
|
||||
github.com/valyala/fasthttp v1.55.0
|
||||
github.com/valyala/fasthttp v1.58.0
|
||||
github.com/vulcand/oxy/v2 v2.0.0
|
||||
github.com/vulcand/predicate v1.2.0
|
||||
go.opentelemetry.io/collector/pdata v1.10.0
|
||||
|
|
14
go.sum
14
go.sum
|
@ -135,8 +135,8 @@ github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRF
|
|||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/aliyun/alibaba-cloud-sdk-go v1.63.72 h1:HvFZUzEbNvfe8F2Mg0wBGv90bPhWDxgVtDHR5zoBOU0=
|
||||
github.com/aliyun/alibaba-cloud-sdk-go v1.63.72/go.mod h1:SOSDHfe1kX91v3W5QiBsWSLqeLxImobbMX1mxrFHsVQ=
|
||||
github.com/andybalholm/brotli v1.1.0 h1:eLKJA0d02Lf0mVpIDgYnqXcUn0GqVmEFny3VuID1U3M=
|
||||
github.com/andybalholm/brotli v1.1.0/go.mod h1:sms7XGricyQI9K10gOSf56VKKWS4oLer58Q+mhRPtnY=
|
||||
github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA=
|
||||
github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
|
@ -713,8 +713,8 @@ github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYs
|
|||
github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg=
|
||||
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
||||
github.com/klauspost/compress v1.17.11-0.20241004063537-dbd6c381492a h1:cwHOqPB4H4iQq8177kf2SxpjNbcjJ2m3lNwKIe28Hqg=
|
||||
github.com/klauspost/compress v1.17.11-0.20241004063537-dbd6c381492a/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
|
@ -1218,8 +1218,8 @@ github.com/urfave/negroni v1.0.0 h1:kIimOitoypq34K7TG7DUaJ9kq/N4Ofuwi1sjz0KipXc=
|
|||
github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4=
|
||||
github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
github.com/valyala/fasthttp v1.55.0 h1:Zkefzgt6a7+bVKHnu/YaYSOPfNYNisSVBo/unVCf8k8=
|
||||
github.com/valyala/fasthttp v1.55.0/go.mod h1:NkY9JtkrpPKmgwV3HTaS2HWaJss9RSIsRVfcxxoHiOM=
|
||||
github.com/valyala/fasthttp v1.58.0 h1:GGB2dWxSbEprU9j0iMJHgdKYJVDyjrOwF9RE59PbRuE=
|
||||
github.com/valyala/fasthttp v1.58.0/go.mod h1:SYXvHHaFp7QZHGKSHmoMipInhrI5StHrhDTYVEjK/Kw=
|
||||
github.com/valyala/fasttemplate v1.0.1/go.mod h1:UQGH1tvbgY+Nz5t2n7tXsz52dQxojPUpymEIMZ47gx8=
|
||||
github.com/valyala/fasttemplate v1.2.1/go.mod h1:KHLXt3tVN2HBp8eijSv/kGJopbvo7S+qRAEEKiv+SiQ=
|
||||
github.com/vinyldns/go-vinyldns v0.9.16 h1:GZJStDkcCk1F1AcRc64LuuMh+ENL8pHA0CVd4ulRMcQ=
|
||||
|
@ -1243,6 +1243,8 @@ github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f/go.mod h1:N2
|
|||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ=
|
||||
github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xyproto/randomstring v1.0.5 h1:YtlWPoRdgMu3NZtP45drfy1GKoojuR7hmRcnhZqKjWU=
|
||||
github.com/xyproto/randomstring v1.0.5/go.mod h1:rgmS5DeNXLivK7YprL0pY+lTuhNQW3iGxZ18UQApw/E=
|
||||
github.com/yandex-cloud/go-genproto v0.0.0-20241220122821-aeb3b05efd1c h1:Rnr+lDYXVkP+3eT8/d68iq4G/UeIhyCQk+HKa8toTvg=
|
||||
github.com/yandex-cloud/go-genproto v0.0.0-20241220122821-aeb3b05efd1c/go.mod h1:0LDD/IZLIUIV4iPH+YcF+jysO3jkSvADFGm4dCAuwQo=
|
||||
github.com/yandex-cloud/go-sdk v0.0.0-20241220131134-2393e243c134 h1:qmpz0Kvr9GAng8LAhRcKIpY71CEAcL3EBkftVlsP5Cw=
|
||||
|
|
|
@ -11,7 +11,6 @@ const (
|
|||
ServiceName = "serviceName"
|
||||
MetricsProviderName = "metricsProviderName"
|
||||
TracingProviderName = "tracingProviderName"
|
||||
ServerName = "serverName"
|
||||
ServerIndex = "serverIndex"
|
||||
TLSStoreName = "tlsStoreName"
|
||||
ServersTransportName = "serversTransport"
|
||||
|
|
|
@ -132,7 +132,7 @@ func RegisterOpenTelemetry(ctx context.Context, config *types.OTLP) Registry {
|
|||
"How many HTTP requests with TLS processed on an entrypoint, partitioned by TLS Version and TLS cipher Used.")
|
||||
reg.entryPointReqDurationHistogram, _ = NewHistogramWithScale(newOTLPHistogramFrom(meter, entryPointReqDurationName,
|
||||
"How long it took to process the request on an entrypoint, partitioned by status code, protocol, and method.",
|
||||
"ms"), time.Second)
|
||||
"s"), time.Second)
|
||||
reg.entryPointReqsBytesCounter = newOTLPCounterFrom(meter, entryPointReqsBytesTotalName,
|
||||
"The total size of requests in bytes handled by an entrypoint, partitioned by status code, protocol, and method.")
|
||||
reg.entryPointRespsBytesCounter = newOTLPCounterFrom(meter, entryPointRespsBytesTotalName,
|
||||
|
@ -146,7 +146,7 @@ func RegisterOpenTelemetry(ctx context.Context, config *types.OTLP) Registry {
|
|||
"How many HTTP requests with TLS are processed on a router, partitioned by service, TLS Version, and TLS cipher Used.")
|
||||
reg.routerReqDurationHistogram, _ = NewHistogramWithScale(newOTLPHistogramFrom(meter, routerReqDurationName,
|
||||
"How long it took to process the request on a router, partitioned by service, status code, protocol, and method.",
|
||||
"ms"), time.Second)
|
||||
"s"), time.Second)
|
||||
reg.routerReqsBytesCounter = newOTLPCounterFrom(meter, routerReqsBytesTotalName,
|
||||
"The total size of requests in bytes handled by a router, partitioned by status code, protocol, and method.")
|
||||
reg.routerRespsBytesCounter = newOTLPCounterFrom(meter, routerRespsBytesTotalName,
|
||||
|
@ -160,7 +160,7 @@ func RegisterOpenTelemetry(ctx context.Context, config *types.OTLP) Registry {
|
|||
"How many HTTP requests with TLS processed on a service, partitioned by TLS version and TLS cipher.")
|
||||
reg.serviceReqDurationHistogram, _ = NewHistogramWithScale(newOTLPHistogramFrom(meter, serviceReqDurationName,
|
||||
"How long it took to process the request on a service, partitioned by status code, protocol, and method.",
|
||||
"ms"), time.Second)
|
||||
"s"), time.Second)
|
||||
reg.serviceRetriesCounter = newOTLPCounterFrom(meter, serviceRetriesTotalName,
|
||||
"How many request retries happened on a service.")
|
||||
reg.serviceServerUpGauge = newOTLPGaugeFrom(meter, serviceServerUpName,
|
||||
|
|
|
@ -376,7 +376,7 @@ func TestOpenTelemetry(t *testing.T) {
|
|||
expectedEntryPoints := []string{
|
||||
`({"name":"traefik_entrypoint_requests_total","description":"How many HTTP requests processed on an entrypoint, partitioned by status code, protocol, and method.","unit":"1","sum":{"dataPoints":\[{"attributes":\[{"key":"code","value":{"stringValue":"200"}},{"key":"entrypoint","value":{"stringValue":"test1"}},{"key":"method","value":{"stringValue":"GET"}}\],"startTimeUnixNano":"[\d]{19}","timeUnixNano":"[\d]{19}","asDouble":1}\],"aggregationTemporality":2,"isMonotonic":true}})`,
|
||||
`({"name":"traefik_entrypoint_requests_tls_total","description":"How many HTTP requests with TLS processed on an entrypoint, partitioned by TLS Version and TLS cipher Used.","unit":"1","sum":{"dataPoints":\[{"attributes":\[{"key":"entrypoint","value":{"stringValue":"test2"}},{"key":"tls_cipher","value":{"stringValue":"bar"}},{"key":"tls_version","value":{"stringValue":"foo"}}\],"startTimeUnixNano":"[\d]{19}","timeUnixNano":"[\d]{19}","asDouble":1}\],"aggregationTemporality":2,"isMonotonic":true}})`,
|
||||
`({"name":"traefik_entrypoint_request_duration_seconds","description":"How long it took to process the request on an entrypoint, partitioned by status code, protocol, and method.","unit":"ms","histogram":{"dataPoints":\[{"attributes":\[{"key":"entrypoint","value":{"stringValue":"test3"}}\],"startTimeUnixNano":"[\d]{19}","timeUnixNano":"[\d]{19}","count":"1","sum":10000,"bucketCounts":\["0","0","0","0","0","0","0","0","0","0","0","0","0","0","1"\],"explicitBounds":\[0.005,0.01,0.025,0.05,0.075,0.1,0.25,0.5,0.75,1,2.5,5,7.5,10\],"min":10000,"max":10000}\],"aggregationTemporality":2}})`,
|
||||
`({"name":"traefik_entrypoint_request_duration_seconds","description":"How long it took to process the request on an entrypoint, partitioned by status code, protocol, and method.","unit":"s","histogram":{"dataPoints":\[{"attributes":\[{"key":"entrypoint","value":{"stringValue":"test3"}}\],"startTimeUnixNano":"[\d]{19}","timeUnixNano":"[\d]{19}","count":"1","sum":10000,"bucketCounts":\["0","0","0","0","0","0","0","0","0","0","0","0","0","0","1"\],"explicitBounds":\[0.005,0.01,0.025,0.05,0.075,0.1,0.25,0.5,0.75,1,2.5,5,7.5,10\],"min":10000,"max":10000}\],"aggregationTemporality":2}})`,
|
||||
`({"name":"traefik_entrypoint_requests_bytes_total","description":"The total size of requests in bytes handled by an entrypoint, partitioned by status code, protocol, and method.","unit":"1","sum":{"dataPoints":\[{"attributes":\[{"key":"code","value":{"stringValue":"200"}},{"key":"entrypoint","value":{"stringValue":"test1"}},{"key":"method","value":{"stringValue":"GET"}}\],"startTimeUnixNano":"[\d]{19}","timeUnixNano":"[\d]{19}","asDouble":1}\],"aggregationTemporality":2,"isMonotonic":true}})`,
|
||||
`({"name":"traefik_entrypoint_responses_bytes_total","description":"The total size of responses in bytes handled by an entrypoint, partitioned by status code, protocol, and method.","unit":"1","sum":{"dataPoints":\[{"attributes":\[{"key":"code","value":{"stringValue":"200"}},{"key":"entrypoint","value":{"stringValue":"test1"}},{"key":"method","value":{"stringValue":"GET"}}\],"startTimeUnixNano":"[\d]{19}","timeUnixNano":"[\d]{19}","asDouble":1}\],"aggregationTemporality":2,"isMonotonic":true}})`,
|
||||
}
|
||||
|
@ -392,7 +392,7 @@ func TestOpenTelemetry(t *testing.T) {
|
|||
expectedRouters := []string{
|
||||
`({"name":"traefik_router_requests_total","description":"How many HTTP requests are processed on a router, partitioned by service, status code, protocol, and method.","unit":"1","sum":{"dataPoints":\[{"attributes":\[{"key":"code","value":{"stringValue":"(?:200|404)"}},{"key":"method","value":{"stringValue":"GET"}},{"key":"router","value":{"stringValue":"RouterReqsCounter"}},{"key":"service","value":{"stringValue":"test"}}\],"startTimeUnixNano":"[\d]{19}","timeUnixNano":"[\d]{19}","asDouble":1},{"attributes":\[{"key":"code","value":{"stringValue":"(?:200|404)"}},{"key":"method","value":{"stringValue":"GET"}},{"key":"router","value":{"stringValue":"RouterReqsCounter"}},{"key":"service","value":{"stringValue":"test"}}\],"startTimeUnixNano":"[\d]{19}","timeUnixNano":"[\d]{19}","asDouble":1}\],"aggregationTemporality":2,"isMonotonic":true}})`,
|
||||
`({"name":"traefik_router_requests_tls_total","description":"How many HTTP requests with TLS are processed on a router, partitioned by service, TLS Version, and TLS cipher Used.","unit":"1","sum":{"dataPoints":\[{"attributes":\[{"key":"router","value":{"stringValue":"demo"}},{"key":"service","value":{"stringValue":"test"}},{"key":"tls_cipher","value":{"stringValue":"bar"}},{"key":"tls_version","value":{"stringValue":"foo"}}\],"startTimeUnixNano":"[\d]{19}","timeUnixNano":"[\d]{19}","asDouble":1}\],"aggregationTemporality":2,"isMonotonic":true}})`,
|
||||
`({"name":"traefik_router_request_duration_seconds","description":"How long it took to process the request on a router, partitioned by service, status code, protocol, and method.","unit":"ms","histogram":{"dataPoints":\[{"attributes":\[{"key":"code","value":{"stringValue":"200"}},{"key":"router","value":{"stringValue":"demo"}},{"key":"service","value":{"stringValue":"test"}}\],"startTimeUnixNano":"[\d]{19}","timeUnixNano":"[\d]{19}","count":"1","sum":10000,"bucketCounts":\["0","0","0","0","0","0","0","0","0","0","0","0","0","0","1"\],"explicitBounds":\[0.005,0.01,0.025,0.05,0.075,0.1,0.25,0.5,0.75,1,2.5,5,7.5,10\],"min":10000,"max":10000}\],"aggregationTemporality":2}})`,
|
||||
`({"name":"traefik_router_request_duration_seconds","description":"How long it took to process the request on a router, partitioned by service, status code, protocol, and method.","unit":"s","histogram":{"dataPoints":\[{"attributes":\[{"key":"code","value":{"stringValue":"200"}},{"key":"router","value":{"stringValue":"demo"}},{"key":"service","value":{"stringValue":"test"}}\],"startTimeUnixNano":"[\d]{19}","timeUnixNano":"[\d]{19}","count":"1","sum":10000,"bucketCounts":\["0","0","0","0","0","0","0","0","0","0","0","0","0","0","1"\],"explicitBounds":\[0.005,0.01,0.025,0.05,0.075,0.1,0.25,0.5,0.75,1,2.5,5,7.5,10\],"min":10000,"max":10000}\],"aggregationTemporality":2}})`,
|
||||
`({"name":"traefik_router_requests_bytes_total","description":"The total size of requests in bytes handled by a router, partitioned by status code, protocol, and method.","unit":"1","sum":{"dataPoints":\[{"attributes":\[{"key":"code","value":{"stringValue":"404"}},{"key":"method","value":{"stringValue":"GET"}},{"key":"router","value":{"stringValue":"RouterReqsCounter"}},{"key":"service","value":{"stringValue":"test"}}\],"startTimeUnixNano":"[\d]{19}","timeUnixNano":"[\d]{19}","asDouble":1}\],"aggregationTemporality":2,"isMonotonic":true}})`,
|
||||
`({"name":"traefik_router_responses_bytes_total","description":"The total size of responses in bytes handled by a router, partitioned by status code, protocol, and method.","unit":"1","sum":{"dataPoints":\[{"attributes":\[{"key":"code","value":{"stringValue":"404"}},{"key":"method","value":{"stringValue":"GET"}},{"key":"router","value":{"stringValue":"RouterReqsCounter"}},{"key":"service","value":{"stringValue":"test"}}\],"startTimeUnixNano":"[\d]{19}","timeUnixNano":"[\d]{19}","asDouble":1}\],"aggregationTemporality":2,"isMonotonic":true}})`,
|
||||
}
|
||||
|
@ -409,7 +409,7 @@ func TestOpenTelemetry(t *testing.T) {
|
|||
expectedServices := []string{
|
||||
`({"name":"traefik_service_requests_total","description":"How many HTTP requests processed on a service, partitioned by status code, protocol, and method.","unit":"1","sum":{"dataPoints":\[{"attributes":\[{"key":"code","value":{"stringValue":"(?:200|404)"}},{"key":"method","value":{"stringValue":"GET"}},{"key":"service","value":{"stringValue":"ServiceReqsCounter"}}\],"startTimeUnixNano":"[\d]{19}","timeUnixNano":"[\d]{19}","asDouble":1},{"attributes":\[{"key":"code","value":{"stringValue":"(?:200|404)"}},{"key":"method","value":{"stringValue":"GET"}},{"key":"service","value":{"stringValue":"ServiceReqsCounter"}}\],"startTimeUnixNano":"[\d]{19}","timeUnixNano":"[\d]{19}","asDouble":1}\],"aggregationTemporality":2,"isMonotonic":true}})`,
|
||||
`({"name":"traefik_service_requests_tls_total","description":"How many HTTP requests with TLS processed on a service, partitioned by TLS version and TLS cipher.","unit":"1","sum":{"dataPoints":\[{"attributes":\[{"key":"service","value":{"stringValue":"test"}},{"key":"tls_cipher","value":{"stringValue":"bar"}},{"key":"tls_version","value":{"stringValue":"foo"}}\],"startTimeUnixNano":"[\d]{19}","timeUnixNano":"[\d]{19}","asDouble":1}\],"aggregationTemporality":2,"isMonotonic":true}})`,
|
||||
`({"name":"traefik_service_request_duration_seconds","description":"How long it took to process the request on a service, partitioned by status code, protocol, and method.","unit":"ms","histogram":{"dataPoints":\[{"attributes":\[{"key":"code","value":{"stringValue":"200"}},{"key":"service","value":{"stringValue":"test"}}\],"startTimeUnixNano":"[\d]{19}","timeUnixNano":"[\d]{19}","count":"1","sum":10000,"bucketCounts":\["0","0","0","0","0","0","0","0","0","0","0","0","0","0","1"\],"explicitBounds":\[0.005,0.01,0.025,0.05,0.075,0.1,0.25,0.5,0.75,1,2.5,5,7.5,10\],"min":10000,"max":10000}\],"aggregationTemporality":2}})`,
|
||||
`({"name":"traefik_service_request_duration_seconds","description":"How long it took to process the request on a service, partitioned by status code, protocol, and method.","unit":"s","histogram":{"dataPoints":\[{"attributes":\[{"key":"code","value":{"stringValue":"200"}},{"key":"service","value":{"stringValue":"test"}}\],"startTimeUnixNano":"[\d]{19}","timeUnixNano":"[\d]{19}","count":"1","sum":10000,"bucketCounts":\["0","0","0","0","0","0","0","0","0","0","0","0","0","0","1"\],"explicitBounds":\[0.005,0.01,0.025,0.05,0.075,0.1,0.25,0.5,0.75,1,2.5,5,7.5,10\],"min":10000,"max":10000}\],"aggregationTemporality":2}})`,
|
||||
`({"name":"traefik_service_server_up","description":"service server is up, described by gauge value of 0 or 1.","unit":"1","gauge":{"dataPoints":\[{"attributes":\[{"key":"service","value":{"stringValue":"test"}},{"key":"url","value":{"stringValue":"http://127.0.0.1"}}\],"startTimeUnixNano":"[\d]{19}","timeUnixNano":"[\d]{19}","asDouble":1}\]}})`,
|
||||
`({"name":"traefik_service_requests_bytes_total","description":"The total size of requests in bytes received by a service, partitioned by status code, protocol, and method.","unit":"1","sum":{"dataPoints":\[{"attributes":\[{"key":"code","value":{"stringValue":"404"}},{"key":"method","value":{"stringValue":"GET"}},{"key":"service","value":{"stringValue":"ServiceReqsCounter"}}\],"startTimeUnixNano":"[\d]{19}","timeUnixNano":"[\d]{19}","asDouble":1}\],"aggregationTemporality":2,"isMonotonic":true}})`,
|
||||
`({"name":"traefik_service_responses_bytes_total","description":"The total size of responses in bytes returned by a service, partitioned by status code, protocol, and method.","unit":"1","sum":{"dataPoints":\[{"attributes":\[{"key":"code","value":{"stringValue":"404"}},{"key":"method","value":{"stringValue":"GET"}},{"key":"service","value":{"stringValue":"ServiceReqsCounter"}}\],"startTimeUnixNano":"[\d]{19}","timeUnixNano":"[\d]{19}","asDouble":1}\],"aggregationTemporality":2,"isMonotonic":true}})`,
|
||||
|
|
|
@ -215,34 +215,20 @@ func (c *conn) handleResponse(r rwWithUpgrade) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
hasContentLength := len(res.Header.Peek("Content-Length")) > 0
|
||||
|
||||
if hasContentLength && res.Header.ContentLength() == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// When a body is not allowed for a given status code the body is ignored.
|
||||
// The connection will be marked as broken by the next Peek in the readloop.
|
||||
if !isBodyAllowedForStatus(res.StatusCode()) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !hasContentLength {
|
||||
b := c.bufferPool.Get()
|
||||
if b == nil {
|
||||
b = make([]byte, bufferSize)
|
||||
}
|
||||
defer c.bufferPool.Put(b)
|
||||
|
||||
if _, err := io.CopyBuffer(r.RW, c.br, b); err != nil {
|
||||
return err
|
||||
}
|
||||
contentLength := res.Header.ContentLength()
|
||||
|
||||
if contentLength == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Chunked response, Content-Length is set to -1 by FastProxy when "Transfer-Encoding: chunked" header is received.
|
||||
if res.Header.ContentLength() == -1 {
|
||||
if contentLength == -1 {
|
||||
cbr := httputil.NewChunkedReader(c.br)
|
||||
|
||||
b := c.bufferPool.Get()
|
||||
|
@ -282,6 +268,23 @@ func (c *conn) handleResponse(r rwWithUpgrade) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Response without Content-Length header.
|
||||
// The message body length is determined by the number of bytes received prior to the server closing the connection.
|
||||
if contentLength == -2 {
|
||||
b := c.bufferPool.Get()
|
||||
if b == nil {
|
||||
b = make([]byte, bufferSize)
|
||||
}
|
||||
defer c.bufferPool.Put(b)
|
||||
|
||||
if _, err := io.CopyBuffer(r.RW, c.br, b); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Response with a valid Content-Length header.
|
||||
brl := c.limitedReaderPool.Get()
|
||||
if brl == nil {
|
||||
brl = &io.LimitedReader{}
|
||||
|
|
|
@ -171,6 +171,7 @@ func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
|
|||
if reqUpType != "" {
|
||||
outReq.Header.Set("Connection", "Upgrade")
|
||||
outReq.Header.Set("Upgrade", reqUpType)
|
||||
|
||||
if strings.EqualFold(reqUpType, "websocket") {
|
||||
cleanWebSocketHeaders(&outReq.Header)
|
||||
}
|
||||
|
@ -351,7 +352,7 @@ func isGraphic(s string) bool {
|
|||
type fasthttpHeader interface {
|
||||
Peek(key string) []byte
|
||||
Set(key string, value string)
|
||||
SetBytesV(key string, value []byte)
|
||||
SetCanonical(key []byte, value []byte)
|
||||
DelBytes(key []byte)
|
||||
Del(key string)
|
||||
ConnectionUpgrade() bool
|
||||
|
@ -382,18 +383,33 @@ func fixPragmaCacheControl(header fasthttpHeader) {
|
|||
// Sec-WebSocket-Protocol and Sec-WebSocket-Version to be case-sensitive.
|
||||
// https://tools.ietf.org/html/rfc6455#page-20
|
||||
func cleanWebSocketHeaders(headers fasthttpHeader) {
|
||||
headers.SetBytesV("Sec-WebSocket-Key", headers.Peek("Sec-Websocket-Key"))
|
||||
headers.Del("Sec-Websocket-Key")
|
||||
secWebsocketKey := headers.Peek("Sec-Websocket-Key")
|
||||
if len(secWebsocketKey) > 0 {
|
||||
headers.SetCanonical([]byte("Sec-WebSocket-Key"), secWebsocketKey)
|
||||
headers.Del("Sec-Websocket-Key")
|
||||
}
|
||||
|
||||
headers.SetBytesV("Sec-WebSocket-Extensions", headers.Peek("Sec-Websocket-Extensions"))
|
||||
headers.Del("Sec-Websocket-Extensions")
|
||||
secWebsocketExtensions := headers.Peek("Sec-Websocket-Extensions")
|
||||
if len(secWebsocketExtensions) > 0 {
|
||||
headers.SetCanonical([]byte("Sec-WebSocket-Extensions"), secWebsocketExtensions)
|
||||
headers.Del("Sec-Websocket-Extensions")
|
||||
}
|
||||
|
||||
headers.SetBytesV("Sec-WebSocket-Accept", headers.Peek("Sec-Websocket-Accept"))
|
||||
headers.Del("Sec-Websocket-Accept")
|
||||
secWebsocketAccept := headers.Peek("Sec-Websocket-Accept")
|
||||
if len(secWebsocketAccept) > 0 {
|
||||
headers.SetCanonical([]byte("Sec-WebSocket-Accept"), secWebsocketAccept)
|
||||
headers.Del("Sec-Websocket-Accept")
|
||||
}
|
||||
|
||||
headers.SetBytesV("Sec-WebSocket-Protocol", headers.Peek("Sec-Websocket-Protocol"))
|
||||
headers.Del("Sec-Websocket-Protocol")
|
||||
secWebsocketProtocol := headers.Peek("Sec-Websocket-Protocol")
|
||||
if len(secWebsocketProtocol) > 0 {
|
||||
headers.SetCanonical([]byte("Sec-WebSocket-Protocol"), secWebsocketProtocol)
|
||||
headers.Del("Sec-Websocket-Protocol")
|
||||
}
|
||||
|
||||
headers.SetBytesV("Sec-WebSocket-Version", headers.Peek("Sec-Websocket-Version"))
|
||||
headers.DelBytes([]byte("Sec-Websocket-Version"))
|
||||
secWebsocketVersion := headers.Peek("Sec-Websocket-Version")
|
||||
if len(secWebsocketVersion) > 0 {
|
||||
headers.SetCanonical([]byte("Sec-WebSocket-Version"), secWebsocketVersion)
|
||||
headers.Del("Sec-Websocket-Version")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -306,7 +306,7 @@ func TestHeadRequest(t *testing.T) {
|
|||
assert.Equal(t, http.StatusOK, res.Code)
|
||||
}
|
||||
|
||||
func TestNoContentLengthResponse(t *testing.T) {
|
||||
func TestNoContentLength(t *testing.T) {
|
||||
backendListener, err := net.Listen("tcp", ":0")
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -346,6 +346,45 @@ func TestNoContentLengthResponse(t *testing.T) {
|
|||
assert.Equal(t, "foo", res.Body.String())
|
||||
}
|
||||
|
||||
func TestTransferEncodingChunked(t *testing.T) {
|
||||
backendServer := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
flusher, ok := rw.(http.Flusher)
|
||||
require.True(t, ok)
|
||||
|
||||
for i := range 3 {
|
||||
_, err := rw.Write([]byte(fmt.Sprintf("chunk %d\n", i)))
|
||||
require.NoError(t, err)
|
||||
|
||||
flusher.Flush()
|
||||
}
|
||||
}))
|
||||
t.Cleanup(backendServer.Close)
|
||||
|
||||
builder := NewProxyBuilder(&transportManagerMock{}, static.FastProxyConfig{})
|
||||
|
||||
proxyHandler, err := builder.Build("", testhelpers.MustParseURL(backendServer.URL), true, true)
|
||||
require.NoError(t, err)
|
||||
|
||||
proxyServer := httptest.NewServer(proxyHandler)
|
||||
t.Cleanup(proxyServer.Close)
|
||||
|
||||
req, err := http.NewRequest(http.MethodGet, proxyServer.URL, http.NoBody)
|
||||
require.NoError(t, err)
|
||||
|
||||
res, err := http.DefaultClient.Do(req)
|
||||
require.NoError(t, err)
|
||||
|
||||
t.Cleanup(func() { _ = res.Body.Close() })
|
||||
|
||||
assert.Equal(t, http.StatusOK, res.StatusCode)
|
||||
assert.Equal(t, []string{"chunked"}, res.TransferEncoding)
|
||||
|
||||
body, err := io.ReadAll(res.Body)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "chunk 0\nchunk 1\nchunk 2\n", string(body))
|
||||
}
|
||||
|
||||
func newCertificate(t *testing.T, domain string) *tls.Certificate {
|
||||
t.Helper()
|
||||
|
||||
|
|
|
@ -18,9 +18,12 @@ import (
|
|||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/traefik/traefik/v3/pkg/testhelpers"
|
||||
"github.com/valyala/fasthttp"
|
||||
"golang.org/x/net/websocket"
|
||||
)
|
||||
|
||||
const dialTimeout = time.Second
|
||||
|
||||
func TestWebSocketUpgradeCase(t *testing.T) {
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
challengeKey := r.Header.Get("Sec-Websocket-Key")
|
||||
|
@ -49,6 +52,31 @@ func TestWebSocketUpgradeCase(t *testing.T) {
|
|||
conn.Close()
|
||||
}
|
||||
|
||||
func TestCleanWebSocketHeaders(t *testing.T) {
|
||||
// Asserts that no headers are sent if the request contain anything.
|
||||
req := fasthttp.AcquireRequest()
|
||||
defer fasthttp.ReleaseRequest(req)
|
||||
|
||||
cleanWebSocketHeaders(&req.Header)
|
||||
|
||||
want := "GET / HTTP/1.1\r\n\r\n"
|
||||
assert.Equal(t, want, req.Header.String())
|
||||
|
||||
// Asserts that the Sec-WebSocket-* is enforced.
|
||||
req.Reset()
|
||||
|
||||
req.Header.Set("Sec-Websocket-Key", "key")
|
||||
req.Header.Set("Sec-Websocket-Extensions", "extensions")
|
||||
req.Header.Set("Sec-Websocket-Accept", "accept")
|
||||
req.Header.Set("Sec-Websocket-Protocol", "protocol")
|
||||
req.Header.Set("Sec-Websocket-Version", "version")
|
||||
|
||||
cleanWebSocketHeaders(&req.Header)
|
||||
|
||||
want = "GET / HTTP/1.1\r\nSec-WebSocket-Key: key\r\nSec-WebSocket-Extensions: extensions\r\nSec-WebSocket-Accept: accept\r\nSec-WebSocket-Protocol: protocol\r\nSec-WebSocket-Version: version\r\n\r\n"
|
||||
assert.Equal(t, want, req.Header.String())
|
||||
}
|
||||
|
||||
func TestWebSocketTCPClose(t *testing.T) {
|
||||
errChan := make(chan error, 1)
|
||||
upgrader := gorillawebsocket.Upgrader{}
|
||||
|
@ -535,29 +563,6 @@ func TestForwardsWebsocketTraffic(t *testing.T) {
|
|||
assert.Equal(t, "ok", resp)
|
||||
}
|
||||
|
||||
func createTLSWebsocketServer() *httptest.Server {
|
||||
upgrader := gorillawebsocket.Upgrader{}
|
||||
srv := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
conn, err := upgrader.Upgrade(w, r, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
for {
|
||||
mt, message, err := conn.ReadMessage()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
err = conn.WriteMessage(mt, message)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
}))
|
||||
return srv
|
||||
}
|
||||
|
||||
func TestWebSocketTransferTLSConfig(t *testing.T) {
|
||||
srv := createTLSWebsocketServer()
|
||||
defer srv.Close()
|
||||
|
@ -592,7 +597,28 @@ func TestWebSocketTransferTLSConfig(t *testing.T) {
|
|||
assert.Equal(t, "ok", resp)
|
||||
}
|
||||
|
||||
const dialTimeout = time.Second
|
||||
func createTLSWebsocketServer() *httptest.Server {
|
||||
upgrader := gorillawebsocket.Upgrader{}
|
||||
srv := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
conn, err := upgrader.Upgrade(w, r, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
for {
|
||||
mt, message, err := conn.ReadMessage()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
|
||||
err = conn.WriteMessage(mt, message)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
}))
|
||||
return srv
|
||||
}
|
||||
|
||||
type websocketRequestOpt func(w *websocketRequest)
|
||||
|
||||
|
|
|
@ -70,7 +70,9 @@ func directorBuilder(target *url.URL, passHostHeader bool, preservePath bool) fu
|
|||
outReq.Host = outReq.URL.Host
|
||||
}
|
||||
|
||||
cleanWebSocketHeaders(outReq)
|
||||
if isWebSocketUpgrade(outReq) {
|
||||
cleanWebSocketHeaders(outReq)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -79,10 +81,6 @@ func directorBuilder(target *url.URL, passHostHeader bool, preservePath bool) fu
|
|||
// Sec-WebSocket-Protocol and Sec-WebSocket-Version to be case-sensitive.
|
||||
// https://tools.ietf.org/html/rfc6455#page-20
|
||||
func cleanWebSocketHeaders(req *http.Request) {
|
||||
if !isWebSocketUpgrade(req) {
|
||||
return
|
||||
}
|
||||
|
||||
req.Header["Sec-WebSocket-Key"] = req.Header["Sec-Websocket-Key"]
|
||||
delete(req.Header, "Sec-Websocket-Key")
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@ package httputil
|
|||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"crypto/tls"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
@ -18,6 +19,8 @@ import (
|
|||
"golang.org/x/net/websocket"
|
||||
)
|
||||
|
||||
const dialTimeout = time.Second
|
||||
|
||||
func TestWebSocketTCPClose(t *testing.T) {
|
||||
errChan := make(chan error, 1)
|
||||
upgrader := gorillawebsocket.Upgrader{}
|
||||
|
@ -419,28 +422,6 @@ func TestForwardsWebsocketTraffic(t *testing.T) {
|
|||
assert.Equal(t, "ok", resp)
|
||||
}
|
||||
|
||||
func createTLSWebsocketServer() *httptest.Server {
|
||||
upgrader := gorillawebsocket.Upgrader{}
|
||||
srv := httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
conn, err := upgrader.Upgrade(w, r, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
for {
|
||||
mt, message, err := conn.ReadMessage()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
err = conn.WriteMessage(mt, message)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
}))
|
||||
return srv
|
||||
}
|
||||
|
||||
func TestWebSocketTransferTLSConfig(t *testing.T) {
|
||||
srv := createTLSWebsocketServer()
|
||||
defer srv.Close()
|
||||
|
@ -495,7 +476,58 @@ func TestWebSocketTransferTLSConfig(t *testing.T) {
|
|||
assert.Equal(t, "ok", resp)
|
||||
}
|
||||
|
||||
const dialTimeout = time.Second
|
||||
func TestCleanWebSocketHeaders(t *testing.T) {
|
||||
// Asserts that no headers are sent if the request contain anything.
|
||||
req := httptest.NewRequest(http.MethodGet, "/", http.NoBody)
|
||||
req.Header.Del("User-Agent")
|
||||
|
||||
cleanWebSocketHeaders(req)
|
||||
|
||||
b := bytes.NewBuffer(nil)
|
||||
err := req.Header.Write(b)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Empty(t, b)
|
||||
|
||||
// Asserts that the Sec-WebSocket-* is enforced.
|
||||
req.Header.Set("Sec-Websocket-Key", "key")
|
||||
req.Header.Set("Sec-Websocket-Extensions", "extensions")
|
||||
req.Header.Set("Sec-Websocket-Accept", "accept")
|
||||
req.Header.Set("Sec-Websocket-Protocol", "protocol")
|
||||
req.Header.Set("Sec-Websocket-Version", "version")
|
||||
|
||||
cleanWebSocketHeaders(req)
|
||||
|
||||
want := http.Header{
|
||||
"Sec-WebSocket-Key": {"key"},
|
||||
"Sec-WebSocket-Extensions": {"extensions"},
|
||||
"Sec-WebSocket-Accept": {"accept"},
|
||||
"Sec-WebSocket-Protocol": {"protocol"},
|
||||
"Sec-WebSocket-Version": {"version"},
|
||||
}
|
||||
assert.Equal(t, want, req.Header)
|
||||
}
|
||||
|
||||
func createTLSWebsocketServer() *httptest.Server {
|
||||
var upgrader gorillawebsocket.Upgrader
|
||||
return httptest.NewTLSServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
conn, err := upgrader.Upgrade(w, r, nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer conn.Close()
|
||||
for {
|
||||
mt, message, err := conn.ReadMessage()
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
err = conn.WriteMessage(mt, message)
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
}))
|
||||
}
|
||||
|
||||
type websocketRequestOpt func(w *websocketRequest)
|
||||
|
||||
|
|
|
@ -3,6 +3,8 @@ package wrr
|
|||
import (
|
||||
"container/heap"
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"hash/fnv"
|
||||
"net/http"
|
||||
|
@ -15,9 +17,10 @@ import (
|
|||
|
||||
type namedHandler struct {
|
||||
http.Handler
|
||||
name string
|
||||
weight float64
|
||||
deadline float64
|
||||
name string
|
||||
hashedName string
|
||||
weight float64
|
||||
deadline float64
|
||||
}
|
||||
|
||||
type stickyCookie struct {
|
||||
|
@ -53,9 +56,10 @@ type Balancer struct {
|
|||
|
||||
handlersMu sync.RWMutex
|
||||
// References all the handlers by name and also by the hashed value of the name.
|
||||
handlerMap map[string]*namedHandler
|
||||
handlers []*namedHandler
|
||||
curDeadline float64
|
||||
stickyMap map[string]*namedHandler
|
||||
compatibilityStickyMap map[string]*namedHandler
|
||||
handlers []*namedHandler
|
||||
curDeadline float64
|
||||
// status is a record of which child services of the Balancer are healthy, keyed
|
||||
// by name of child service. A service is initially added to the map when it is
|
||||
// created via Add, and it is later removed or added to the map as needed,
|
||||
|
@ -73,7 +77,6 @@ func New(sticky *dynamic.Sticky, wantHealthCheck bool) *Balancer {
|
|||
balancer := &Balancer{
|
||||
status: make(map[string]struct{}),
|
||||
fenced: make(map[string]struct{}),
|
||||
handlerMap: make(map[string]*namedHandler),
|
||||
wantsHealthCheck: wantHealthCheck,
|
||||
}
|
||||
if sticky != nil && sticky.Cookie != nil {
|
||||
|
@ -88,6 +91,9 @@ func New(sticky *dynamic.Sticky, wantHealthCheck bool) *Balancer {
|
|||
if sticky.Cookie.Path != nil {
|
||||
balancer.stickyCookie.path = *sticky.Cookie.Path
|
||||
}
|
||||
|
||||
balancer.stickyMap = make(map[string]*namedHandler)
|
||||
balancer.compatibilityStickyMap = make(map[string]*namedHandler)
|
||||
}
|
||||
|
||||
return balancer
|
||||
|
@ -218,7 +224,7 @@ func (b *Balancer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
|||
|
||||
if err == nil && cookie != nil {
|
||||
b.handlersMu.RLock()
|
||||
handler, ok := b.handlerMap[cookie.Value]
|
||||
handler, ok := b.stickyMap[cookie.Value]
|
||||
b.handlersMu.RUnlock()
|
||||
|
||||
if ok && handler != nil {
|
||||
|
@ -230,6 +236,22 @@ func (b *Balancer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
|||
return
|
||||
}
|
||||
}
|
||||
|
||||
b.handlersMu.RLock()
|
||||
handler, ok = b.compatibilityStickyMap[cookie.Value]
|
||||
b.handlersMu.RUnlock()
|
||||
|
||||
if ok && handler != nil {
|
||||
b.handlersMu.RLock()
|
||||
_, isHealthy := b.status[handler.name]
|
||||
b.handlersMu.RUnlock()
|
||||
if isHealthy {
|
||||
b.writeStickyCookie(w, handler)
|
||||
|
||||
handler.ServeHTTP(w, req)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -244,21 +266,25 @@ func (b *Balancer) ServeHTTP(w http.ResponseWriter, req *http.Request) {
|
|||
}
|
||||
|
||||
if b.stickyCookie != nil {
|
||||
cookie := &http.Cookie{
|
||||
Name: b.stickyCookie.name,
|
||||
Value: hash(server.name),
|
||||
Path: b.stickyCookie.path,
|
||||
HttpOnly: b.stickyCookie.httpOnly,
|
||||
Secure: b.stickyCookie.secure,
|
||||
SameSite: convertSameSite(b.stickyCookie.sameSite),
|
||||
MaxAge: b.stickyCookie.maxAge,
|
||||
}
|
||||
http.SetCookie(w, cookie)
|
||||
b.writeStickyCookie(w, server)
|
||||
}
|
||||
|
||||
server.ServeHTTP(w, req)
|
||||
}
|
||||
|
||||
func (b *Balancer) writeStickyCookie(w http.ResponseWriter, handler *namedHandler) {
|
||||
cookie := &http.Cookie{
|
||||
Name: b.stickyCookie.name,
|
||||
Value: handler.hashedName,
|
||||
Path: b.stickyCookie.path,
|
||||
HttpOnly: b.stickyCookie.httpOnly,
|
||||
Secure: b.stickyCookie.secure,
|
||||
SameSite: convertSameSite(b.stickyCookie.sameSite),
|
||||
MaxAge: b.stickyCookie.maxAge,
|
||||
}
|
||||
http.SetCookie(w, cookie)
|
||||
}
|
||||
|
||||
// Add adds a handler.
|
||||
// A handler with a non-positive weight is ignored.
|
||||
func (b *Balancer) Add(name string, handler http.Handler, weight *int, fenced bool) {
|
||||
|
@ -280,15 +306,41 @@ func (b *Balancer) Add(name string, handler http.Handler, weight *int, fenced bo
|
|||
if fenced {
|
||||
b.fenced[name] = struct{}{}
|
||||
}
|
||||
b.handlerMap[name] = h
|
||||
b.handlerMap[hash(name)] = h
|
||||
|
||||
if b.stickyCookie != nil {
|
||||
sha256HashedName := sha256Hash(name)
|
||||
h.hashedName = sha256HashedName
|
||||
|
||||
b.stickyMap[sha256HashedName] = h
|
||||
b.compatibilityStickyMap[name] = h
|
||||
|
||||
hashedName := fnvHash(name)
|
||||
b.compatibilityStickyMap[hashedName] = h
|
||||
|
||||
// server.URL was fnv hashed in service.Manager
|
||||
// so we can have "double" fnv hash in already existing cookies
|
||||
hashedName = fnvHash(hashedName)
|
||||
b.compatibilityStickyMap[hashedName] = h
|
||||
}
|
||||
b.handlersMu.Unlock()
|
||||
}
|
||||
|
||||
func hash(input string) string {
|
||||
func fnvHash(input string) string {
|
||||
hasher := fnv.New64()
|
||||
// We purposely ignore the error because the implementation always returns nil.
|
||||
_, _ = hasher.Write([]byte(input))
|
||||
|
||||
return strconv.FormatUint(hasher.Sum64(), 16)
|
||||
}
|
||||
|
||||
func sha256Hash(input string) string {
|
||||
hash := sha256.New()
|
||||
// We purposely ignore the error because the implementation always returns nil.
|
||||
_, _ = hash.Write([]byte(input))
|
||||
|
||||
hashedInput := hex.EncodeToString(hash.Sum(nil))
|
||||
if len(hashedInput) < 16 {
|
||||
return hashedInput
|
||||
}
|
||||
return hashedInput[:16]
|
||||
}
|
||||
|
|
|
@ -296,7 +296,7 @@ func TestSticky_FallBack(t *testing.T) {
|
|||
rw.WriteHeader(http.StatusOK)
|
||||
}), pointer(2), false)
|
||||
|
||||
recorder := &responseRecorder{ResponseRecorder: httptest.NewRecorder(), save: map[string]int{}}
|
||||
recorder := &responseRecorder{ResponseRecorder: httptest.NewRecorder(), save: map[string]int{}, cookies: make(map[string]*http.Cookie)}
|
||||
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
req.AddCookie(&http.Cookie{Name: "test", Value: "second"})
|
||||
|
@ -373,7 +373,7 @@ func TestSticky_Fenced(t *testing.T) {
|
|||
rw.WriteHeader(http.StatusOK)
|
||||
}), pointer(1), true)
|
||||
|
||||
recorder := &responseRecorder{ResponseRecorder: httptest.NewRecorder(), save: map[string]int{}}
|
||||
recorder := &responseRecorder{ResponseRecorder: httptest.NewRecorder(), save: map[string]int{}, cookies: make(map[string]*http.Cookie)}
|
||||
|
||||
stickyReq := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
stickyReq.AddCookie(&http.Cookie{Name: "test", Value: "fenced"})
|
||||
|
@ -391,3 +391,99 @@ func TestSticky_Fenced(t *testing.T) {
|
|||
assert.Equal(t, 2, recorder.save["first"])
|
||||
assert.Equal(t, 2, recorder.save["second"])
|
||||
}
|
||||
|
||||
func TestStickyWithCompatibility(t *testing.T) {
|
||||
testCases := []struct {
|
||||
desc string
|
||||
servers []string
|
||||
cookies []*http.Cookie
|
||||
|
||||
expectedCookies []*http.Cookie
|
||||
expectedServer string
|
||||
}{
|
||||
{
|
||||
desc: "No previous cookie",
|
||||
servers: []string{"first"},
|
||||
|
||||
expectedServer: "first",
|
||||
expectedCookies: []*http.Cookie{
|
||||
{Name: "test", Value: sha256Hash("first")},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Sha256 previous cookie",
|
||||
servers: []string{"first", "second"},
|
||||
cookies: []*http.Cookie{
|
||||
{Name: "test", Value: sha256Hash("first")},
|
||||
},
|
||||
expectedServer: "first",
|
||||
expectedCookies: []*http.Cookie{},
|
||||
},
|
||||
{
|
||||
desc: "Raw previous cookie",
|
||||
servers: []string{"first", "second"},
|
||||
cookies: []*http.Cookie{
|
||||
{Name: "test", Value: "first"},
|
||||
},
|
||||
expectedServer: "first",
|
||||
expectedCookies: []*http.Cookie{
|
||||
{Name: "test", Value: sha256Hash("first")},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Fnv previous cookie",
|
||||
servers: []string{"first", "second"},
|
||||
cookies: []*http.Cookie{
|
||||
{Name: "test", Value: fnvHash("first")},
|
||||
},
|
||||
expectedServer: "first",
|
||||
expectedCookies: []*http.Cookie{
|
||||
{Name: "test", Value: sha256Hash("first")},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "Double fnv previous cookie",
|
||||
servers: []string{"first", "second"},
|
||||
cookies: []*http.Cookie{
|
||||
{Name: "test", Value: fnvHash(fnvHash("first"))},
|
||||
},
|
||||
expectedServer: "first",
|
||||
expectedCookies: []*http.Cookie{
|
||||
{Name: "test", Value: sha256Hash("first")},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range testCases {
|
||||
t.Run(test.desc, func(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
balancer := New(&dynamic.Sticky{Cookie: &dynamic.Cookie{Name: "test"}}, false)
|
||||
|
||||
for _, server := range test.servers {
|
||||
balancer.Add(server, http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
|
||||
rw.WriteHeader(http.StatusOK)
|
||||
_, _ = rw.Write([]byte(server))
|
||||
}), pointer(1), false)
|
||||
}
|
||||
|
||||
// Do it twice, to be sure it's not just the luck.
|
||||
for range 2 {
|
||||
req := httptest.NewRequest(http.MethodGet, "/", nil)
|
||||
for _, cookie := range test.cookies {
|
||||
req.AddCookie(cookie)
|
||||
}
|
||||
|
||||
recorder := &responseRecorder{ResponseRecorder: httptest.NewRecorder(), save: map[string]int{}, cookies: make(map[string]*http.Cookie)}
|
||||
balancer.ServeHTTP(recorder, req)
|
||||
|
||||
assert.Equal(t, test.expectedServer, recorder.Body.String())
|
||||
|
||||
assert.Len(t, recorder.cookies, len(test.expectedCookies))
|
||||
for _, cookie := range test.expectedCookies {
|
||||
assert.Equal(t, cookie.Value, recorder.cookies[cookie.Name].Value)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -2,11 +2,9 @@ package service
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"net/url"
|
||||
|
@ -335,18 +333,13 @@ func (m *Manager) getLoadBalancerServiceHandler(ctx context.Context, serviceName
|
|||
lb := wrr.New(service.Sticky, service.HealthCheck != nil)
|
||||
healthCheckTargets := make(map[string]*url.URL)
|
||||
|
||||
for _, server := range shuffle(service.Servers, m.rand) {
|
||||
hasher := fnv.New64a()
|
||||
_, _ = hasher.Write([]byte(server.URL)) // this will never return an error.
|
||||
|
||||
proxyName := hex.EncodeToString(hasher.Sum(nil))
|
||||
|
||||
for i, server := range shuffle(service.Servers, m.rand) {
|
||||
target, err := url.Parse(server.URL)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error parsing server URL %s: %w", server.URL, err)
|
||||
}
|
||||
|
||||
logger.Debug().Str(logs.ServerName, proxyName).Stringer("target", target).
|
||||
logger.Debug().Int(logs.ServerIndex, i).Str("URL", server.URL).
|
||||
Msg("Creating server")
|
||||
|
||||
qualifiedSvcName := provider.GetQualifiedName(ctx, serviceName)
|
||||
|
@ -392,12 +385,12 @@ func (m *Manager) getLoadBalancerServiceHandler(ctx context.Context, serviceName
|
|||
proxy, _ = capture.Wrap(proxy)
|
||||
}
|
||||
|
||||
lb.Add(proxyName, proxy, server.Weight, server.Fenced)
|
||||
lb.Add(server.URL, proxy, server.Weight, server.Fenced)
|
||||
|
||||
// servers are considered UP by default.
|
||||
info.UpdateServerStatus(target.String(), runtime.StatusUp)
|
||||
|
||||
healthCheckTargets[proxyName] = target
|
||||
healthCheckTargets[server.URL] = target
|
||||
}
|
||||
|
||||
if service.HealthCheck != nil {
|
||||
|
|
|
@ -25,7 +25,7 @@ import (
|
|||
|
||||
// Backend is an abstraction for tracking backend (OpenTelemetry, ...).
|
||||
type Backend interface {
|
||||
Setup(serviceName string, sampleRate float64, globalAttributes map[string]string) (trace.Tracer, io.Closer, error)
|
||||
Setup(serviceName string, sampleRate float64, resourceAttributes map[string]string) (trace.Tracer, io.Closer, error)
|
||||
}
|
||||
|
||||
// NewTracing Creates a Tracing.
|
||||
|
@ -44,7 +44,7 @@ func NewTracing(conf *static.Tracing) (*Tracer, io.Closer, error) {
|
|||
|
||||
otel.SetTextMapPropagator(autoprop.NewTextMapPropagator())
|
||||
|
||||
tr, closer, err := backend.Setup(conf.ServiceName, conf.SampleRate, conf.GlobalAttributes)
|
||||
tr, closer, err := backend.Setup(conf.ServiceName, conf.SampleRate, conf.ResourceAttributes)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
|
|
@ -73,6 +73,7 @@ func TestTracing(t *testing.T) {
|
|||
desc string
|
||||
propagators string
|
||||
headers map[string]string
|
||||
resourceAttributes map[string]string
|
||||
wantServiceHeadersFn func(t *testing.T, headers http.Header)
|
||||
assertFn func(*testing.T, string)
|
||||
}{
|
||||
|
@ -85,6 +86,17 @@ func TestTracing(t *testing.T) {
|
|||
assert.Regexp(t, `({"key":"service.version","value":{"stringValue":"dev"}})`, trace)
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "resource attributes must be propagated",
|
||||
resourceAttributes: map[string]string{
|
||||
"service.environment": "custom",
|
||||
},
|
||||
assertFn: func(t *testing.T, trace string) {
|
||||
t.Helper()
|
||||
|
||||
assert.Regexp(t, `({"key":"service.environment","value":{"stringValue":"custom"}})`, trace)
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "TraceContext propagation",
|
||||
propagators: "tracecontext",
|
||||
|
@ -328,8 +340,9 @@ func TestTracing(t *testing.T) {
|
|||
})
|
||||
|
||||
tracingConfig := &static.Tracing{
|
||||
ServiceName: "traefik",
|
||||
SampleRate: 1.0,
|
||||
ServiceName: "traefik",
|
||||
SampleRate: 1.0,
|
||||
ResourceAttributes: test.resourceAttributes,
|
||||
OTLP: &types.OTelTracing{
|
||||
HTTP: &types.OTelHTTP{
|
||||
Endpoint: collector.URL,
|
||||
|
|
|
@ -36,7 +36,7 @@ func (c *OTelTracing) SetDefaults() {
|
|||
}
|
||||
|
||||
// Setup sets up the tracer.
|
||||
func (c *OTelTracing) Setup(serviceName string, sampleRate float64, globalAttributes map[string]string) (trace.Tracer, io.Closer, error) {
|
||||
func (c *OTelTracing) Setup(serviceName string, sampleRate float64, resourceAttributes map[string]string) (trace.Tracer, io.Closer, error) {
|
||||
var (
|
||||
err error
|
||||
exporter *otlptrace.Exporter
|
||||
|
@ -55,7 +55,7 @@ func (c *OTelTracing) Setup(serviceName string, sampleRate float64, globalAttrib
|
|||
semconv.ServiceVersionKey.String(version.Version),
|
||||
}
|
||||
|
||||
for k, v := range globalAttributes {
|
||||
for k, v := range resourceAttributes {
|
||||
attr = append(attr, attribute.String(k, v))
|
||||
}
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue