chore: update linter.
This commit is contained in:
parent
ec0d03658d
commit
2e7833df49
21 changed files with 179 additions and 140 deletions
|
@ -40,7 +40,7 @@ func Do(baseConfig interface{}, indent bool) (string, error) {
|
|||
}
|
||||
|
||||
func doOnJSON(input string) string {
|
||||
mailExp := regexp.MustCompile(`\w[-._\w]*\w@\w[-._\w]*\w\.\w{2,3}"`)
|
||||
mailExp := regexp.MustCompile(`\w[-.\w]*\w@\w[-.\w]*\w\.\w{2,3}"`)
|
||||
return xurls.Relaxed().ReplaceAllString(mailExp.ReplaceAllString(input, maskLarge+"\""), maskLarge)
|
||||
}
|
||||
|
||||
|
|
|
@ -79,7 +79,7 @@ func loadConfigFiles(configFile string, element interface{}) (string, error) {
|
|||
return "", nil
|
||||
}
|
||||
|
||||
if err = file.Decode(filePath, element); err != nil {
|
||||
if err := file.Decode(filePath, element); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return filePath, nil
|
||||
|
|
|
@ -24,9 +24,21 @@ func TestJobBackOff(t *testing.T) {
|
|||
exp.MinJobInterval = testMinJobInterval
|
||||
exp.Reset()
|
||||
|
||||
expectedResults := []time.Duration{500, 500, 500, 1000, 2000, 4000, 5000, 5000, 500, 1000, 2000, 4000, 5000, 5000}
|
||||
for i, d := range expectedResults {
|
||||
expectedResults[i] = d * time.Millisecond
|
||||
expectedResults := []time.Duration{
|
||||
500 * time.Millisecond,
|
||||
500 * time.Millisecond,
|
||||
500 * time.Millisecond,
|
||||
1 * time.Second,
|
||||
2 * time.Second,
|
||||
4 * time.Second,
|
||||
5 * time.Second,
|
||||
5 * time.Second,
|
||||
500 * time.Millisecond,
|
||||
1 * time.Second,
|
||||
2 * time.Second,
|
||||
4 * time.Second,
|
||||
5 * time.Second,
|
||||
5 * time.Second,
|
||||
}
|
||||
|
||||
for i, expected := range expectedResults {
|
||||
|
|
|
@ -390,7 +390,7 @@ func newCollector(metricName string, labels stdprometheus.Labels, c stdprometheu
|
|||
|
||||
// collector wraps a Collector object from the Prometheus client library.
|
||||
// It adds information on how many generations this metric should be present
|
||||
// in the /metrics output, relatived to the time it was last tracked.
|
||||
// in the /metrics output, relative to the time it was last tracked.
|
||||
type collector struct {
|
||||
id string
|
||||
labels stdprometheus.Labels
|
||||
|
|
|
@ -711,10 +711,10 @@ func assertValidLogData(t *testing.T, expected string, logData []byte) {
|
|||
assert.Equal(t, resultExpected[OriginContentSize], result[OriginContentSize], formatErrMessage)
|
||||
assert.Equal(t, resultExpected[RequestRefererHeader], result[RequestRefererHeader], formatErrMessage)
|
||||
assert.Equal(t, resultExpected[RequestUserAgentHeader], result[RequestUserAgentHeader], formatErrMessage)
|
||||
assert.Regexp(t, regexp.MustCompile("[0-9]*"), result[RequestCount], formatErrMessage)
|
||||
assert.Regexp(t, regexp.MustCompile(`\d*`), result[RequestCount], formatErrMessage)
|
||||
assert.Equal(t, resultExpected[RouterName], result[RouterName], formatErrMessage)
|
||||
assert.Equal(t, resultExpected[ServiceURL], result[ServiceURL], formatErrMessage)
|
||||
assert.Regexp(t, regexp.MustCompile("[0-9]*ms"), result[Duration], formatErrMessage)
|
||||
assert.Regexp(t, regexp.MustCompile(`\d*ms`), result[Duration], formatErrMessage)
|
||||
}
|
||||
|
||||
func captureStdout(t *testing.T) (out *os.File, restoreStdout func()) {
|
||||
|
|
|
@ -92,39 +92,42 @@ func (c *customErrors) ServeHTTP(rw http.ResponseWriter, req *http.Request) {
|
|||
// check the recorder code against the configured http status code ranges
|
||||
code := catcher.getCode()
|
||||
for _, block := range c.httpCodeRanges {
|
||||
if code >= block[0] && code <= block[1] {
|
||||
logger.Debugf("Caught HTTP Status Code %d, returning error page", code)
|
||||
if code < block[0] || code > block[1] {
|
||||
continue
|
||||
}
|
||||
|
||||
var query string
|
||||
if len(c.backendQuery) > 0 {
|
||||
query = "/" + strings.TrimPrefix(c.backendQuery, "/")
|
||||
query = strings.ReplaceAll(query, "{status}", strconv.Itoa(code))
|
||||
}
|
||||
logger.Debugf("Caught HTTP Status Code %d, returning error page", code)
|
||||
|
||||
pageReq, err := newRequest(backendURL + query)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
rw.WriteHeader(code)
|
||||
_, err = fmt.Fprint(rw, http.StatusText(code))
|
||||
if err != nil {
|
||||
http.Error(rw, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
return
|
||||
}
|
||||
var query string
|
||||
if len(c.backendQuery) > 0 {
|
||||
query = "/" + strings.TrimPrefix(c.backendQuery, "/")
|
||||
query = strings.ReplaceAll(query, "{status}", strconv.Itoa(code))
|
||||
}
|
||||
|
||||
recorderErrorPage := newResponseRecorder(ctx, rw)
|
||||
utils.CopyHeaders(pageReq.Header, req.Header)
|
||||
|
||||
c.backendHandler.ServeHTTP(recorderErrorPage, pageReq.WithContext(req.Context()))
|
||||
|
||||
utils.CopyHeaders(rw.Header(), recorderErrorPage.Header())
|
||||
pageReq, err := newRequest(backendURL + query)
|
||||
if err != nil {
|
||||
logger.Error(err)
|
||||
rw.WriteHeader(code)
|
||||
|
||||
if _, err = rw.Write(recorderErrorPage.GetBody().Bytes()); err != nil {
|
||||
logger.Error(err)
|
||||
_, err = fmt.Fprint(rw, http.StatusText(code))
|
||||
if err != nil {
|
||||
http.Error(rw, err.Error(), http.StatusInternalServerError)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
recorderErrorPage := newResponseRecorder(ctx, rw)
|
||||
utils.CopyHeaders(pageReq.Header, req.Header)
|
||||
|
||||
c.backendHandler.ServeHTTP(recorderErrorPage, pageReq.WithContext(req.Context()))
|
||||
|
||||
utils.CopyHeaders(rw.Header(), recorderErrorPage.Header())
|
||||
rw.WriteHeader(code)
|
||||
|
||||
if _, err = rw.Write(recorderErrorPage.GetBody().Bytes()); err != nil {
|
||||
logger.Error(err)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -27,12 +27,12 @@ func (n MockTracer) Extract(format, carrier interface{}) (opentracing.SpanContex
|
|||
return nil, opentracing.ErrSpanContextNotFound
|
||||
}
|
||||
|
||||
// MockSpanContext.
|
||||
// MockSpanContext a span context mock.
|
||||
type MockSpanContext struct{}
|
||||
|
||||
func (n MockSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {}
|
||||
|
||||
// MockSpan.
|
||||
// MockSpan a span mock.
|
||||
type MockSpan struct {
|
||||
OpName string
|
||||
Tags map[string]interface{}
|
||||
|
|
|
@ -541,23 +541,24 @@ func (p *Provider) makeGatewayStatus(listenerStatuses []v1alpha1.ListenerStatus)
|
|||
|
||||
gatewayStatus.Listeners = listenerStatuses
|
||||
|
||||
// update "Scheduled" status with "ResourcesAvailable" reason
|
||||
gatewayStatus.Conditions = append(gatewayStatus.Conditions, metav1.Condition{
|
||||
Type: string(v1alpha1.GatewayConditionScheduled),
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "ResourcesAvailable",
|
||||
Message: "Resources available",
|
||||
LastTransitionTime: metav1.Now(),
|
||||
})
|
||||
|
||||
// update "Ready" status with "ListenersValid" reason
|
||||
gatewayStatus.Conditions = append(gatewayStatus.Conditions, metav1.Condition{
|
||||
Type: string(v1alpha1.GatewayConditionReady),
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "ListenersValid",
|
||||
Message: "Listeners valid",
|
||||
LastTransitionTime: metav1.Now(),
|
||||
})
|
||||
gatewayStatus.Conditions = append(gatewayStatus.Conditions,
|
||||
// update "Scheduled" status with "ResourcesAvailable" reason
|
||||
metav1.Condition{
|
||||
Type: string(v1alpha1.GatewayConditionScheduled),
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "ResourcesAvailable",
|
||||
Message: "Resources available",
|
||||
LastTransitionTime: metav1.Now(),
|
||||
},
|
||||
// update "Ready" status with "ListenersValid" reason
|
||||
metav1.Condition{
|
||||
Type: string(v1alpha1.GatewayConditionReady),
|
||||
Status: metav1.ConditionTrue,
|
||||
Reason: "ListenersValid",
|
||||
Message: "Listeners valid",
|
||||
LastTransitionTime: metav1.Now(),
|
||||
},
|
||||
)
|
||||
|
||||
return gatewayStatus, nil
|
||||
}
|
||||
|
|
|
@ -341,7 +341,7 @@ func (c *clientWrapper) updateIngressStatusOld(src *networkingv1beta1.Ingress, i
|
|||
}
|
||||
|
||||
// isLoadBalancerIngressEquals returns true if the given slices are equal, false otherwise.
|
||||
func isLoadBalancerIngressEquals(aSlice []corev1.LoadBalancerIngress, bSlice []corev1.LoadBalancerIngress) bool {
|
||||
func isLoadBalancerIngressEquals(aSlice, bSlice []corev1.LoadBalancerIngress) bool {
|
||||
if len(aSlice) != len(bSlice) {
|
||||
return false
|
||||
}
|
||||
|
|
|
@ -341,11 +341,11 @@ func (ln tcpKeepAliveListener) Accept() (net.Conn, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
if err = tc.SetKeepAlive(true); err != nil {
|
||||
if err := tc.SetKeepAlive(true); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = tc.SetKeepAlivePeriod(3 * time.Minute); err != nil {
|
||||
if err := tc.SetKeepAlivePeriod(3 * time.Minute); err != nil {
|
||||
// Some systems, such as OpenBSD, have no user-settable per-socket TCP
|
||||
// keepalive options.
|
||||
if !errors.Is(err, syscall.ENOPROTOOPT) {
|
||||
|
|
|
@ -147,8 +147,8 @@ func (b blackHoleResponseWriter) Header() http.Header {
|
|||
return http.Header{}
|
||||
}
|
||||
|
||||
func (b blackHoleResponseWriter) Write(bytes []byte) (int, error) {
|
||||
return len(bytes), nil
|
||||
func (b blackHoleResponseWriter) Write(data []byte) (int, error) {
|
||||
return len(data), nil
|
||||
}
|
||||
|
||||
func (b blackHoleResponseWriter) WriteHeader(statusCode int) {}
|
||||
|
|
|
@ -24,6 +24,19 @@ type stickyCookie struct {
|
|||
httpOnly bool
|
||||
}
|
||||
|
||||
// Balancer is a WeightedRoundRobin load balancer based on Earliest Deadline First (EDF).
|
||||
// (https://en.wikipedia.org/wiki/Earliest_deadline_first_scheduling)
|
||||
// Each pick from the schedule has the earliest deadline entry selected.
|
||||
// Entries have deadlines set at currentDeadline + 1 / weight,
|
||||
// providing weighted round robin behavior with floating point weights and an O(log n) pick time.
|
||||
type Balancer struct {
|
||||
stickyCookie *stickyCookie
|
||||
|
||||
mutex sync.RWMutex
|
||||
handlers []*namedHandler
|
||||
curDeadline float64
|
||||
}
|
||||
|
||||
// New creates a new load balancer.
|
||||
func New(sticky *dynamic.Sticky) *Balancer {
|
||||
balancer := &Balancer{}
|
||||
|
@ -68,19 +81,6 @@ func (b *Balancer) Pop() interface{} {
|
|||
return h
|
||||
}
|
||||
|
||||
// Balancer is a WeightedRoundRobin load balancer based on Earliest Deadline First (EDF).
|
||||
// (https://en.wikipedia.org/wiki/Earliest_deadline_first_scheduling)
|
||||
// Each pick from the schedule has the earliest deadline entry selected.
|
||||
// Entries have deadlines set at currentDeadline + 1 / weight,
|
||||
// providing weighted round robin behavior with floating point weights and an O(log n) pick time.
|
||||
type Balancer struct {
|
||||
stickyCookie *stickyCookie
|
||||
|
||||
mutex sync.RWMutex
|
||||
handlers []*namedHandler
|
||||
curDeadline float64
|
||||
}
|
||||
|
||||
func (b *Balancer) nextServer() (*namedHandler, error) {
|
||||
b.mutex.Lock()
|
||||
defer b.mutex.Unlock()
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue