Upgrade dependencies
This commit is contained in:
parent
d6d795e286
commit
83e09acc9f
177 changed files with 59841 additions and 39358 deletions
932
vendor/google.golang.org/grpc/grpclb.go
generated
vendored
932
vendor/google.golang.org/grpc/grpclb.go
generated
vendored
|
@ -19,21 +19,32 @@
|
|||
package grpc
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"net"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/codes"
|
||||
lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1"
|
||||
"google.golang.org/grpc/balancer"
|
||||
"google.golang.org/grpc/connectivity"
|
||||
lbpb "google.golang.org/grpc/grpclb/grpc_lb_v1/messages"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/naming"
|
||||
"google.golang.org/grpc/resolver"
|
||||
)
|
||||
|
||||
const (
|
||||
lbTokeyKey = "lb-token"
|
||||
defaultFallbackTimeout = 10 * time.Second
|
||||
grpclbName = "grpclb"
|
||||
)
|
||||
|
||||
func convertDuration(d *lbpb.Duration) time.Duration {
|
||||
if d == nil {
|
||||
return 0
|
||||
}
|
||||
return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond
|
||||
}
|
||||
|
||||
// Client API for LoadBalancer service.
|
||||
// Mostly copied from generated pb.go file.
|
||||
// To avoid circular dependency.
|
||||
|
@ -47,7 +58,7 @@ func (c *loadBalancerClient) BalanceLoad(ctx context.Context, opts ...CallOption
|
|||
ServerStreams: true,
|
||||
ClientStreams: true,
|
||||
}
|
||||
stream, err := NewClientStream(ctx, desc, c.cc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...)
|
||||
stream, err := c.cc.NewStream(ctx, desc, "/grpc.lb.v1.LoadBalancer/BalanceLoad", opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -71,687 +82,260 @@ func (x *balanceLoadClientStream) Recv() (*lbpb.LoadBalanceResponse, error) {
|
|||
return m, nil
|
||||
}
|
||||
|
||||
// AddressType indicates the address type returned by name resolution.
|
||||
type AddressType uint8
|
||||
|
||||
const (
|
||||
// Backend indicates the server is a backend server.
|
||||
Backend AddressType = iota
|
||||
// GRPCLB indicates the server is a grpclb load balancer.
|
||||
GRPCLB
|
||||
)
|
||||
|
||||
// AddrMetadataGRPCLB contains the information the name resolver for grpclb should provide. The
|
||||
// name resolver used by the grpclb balancer is required to provide this type of metadata in
|
||||
// its address updates.
|
||||
type AddrMetadataGRPCLB struct {
|
||||
// AddrType is the type of server (grpc load balancer or backend).
|
||||
AddrType AddressType
|
||||
// ServerName is the name of the grpc load balancer. Used for authentication.
|
||||
ServerName string
|
||||
func init() {
|
||||
balancer.Register(newLBBuilder())
|
||||
}
|
||||
|
||||
// NewGRPCLBBalancer creates a grpclb load balancer.
|
||||
func NewGRPCLBBalancer(r naming.Resolver) Balancer {
|
||||
return &balancer{
|
||||
r: r,
|
||||
// newLBBuilder creates a builder for grpclb.
|
||||
func newLBBuilder() balancer.Builder {
|
||||
return NewLBBuilderWithFallbackTimeout(defaultFallbackTimeout)
|
||||
}
|
||||
|
||||
// NewLBBuilderWithFallbackTimeout creates a grpclb builder with the given
|
||||
// fallbackTimeout. If no response is received from the remote balancer within
|
||||
// fallbackTimeout, the backend addresses from the resolved address list will be
|
||||
// used.
|
||||
//
|
||||
// Only call this function when a non-default fallback timeout is needed.
|
||||
func NewLBBuilderWithFallbackTimeout(fallbackTimeout time.Duration) balancer.Builder {
|
||||
return &lbBuilder{
|
||||
fallbackTimeout: fallbackTimeout,
|
||||
}
|
||||
}
|
||||
|
||||
type remoteBalancerInfo struct {
|
||||
addr string
|
||||
// the server name used for authentication with the remote LB server.
|
||||
name string
|
||||
type lbBuilder struct {
|
||||
fallbackTimeout time.Duration
|
||||
}
|
||||
|
||||
// grpclbAddrInfo consists of the information of a backend server.
|
||||
type grpclbAddrInfo struct {
|
||||
addr Address
|
||||
connected bool
|
||||
// dropForRateLimiting indicates whether this particular request should be
|
||||
// dropped by the client for rate limiting.
|
||||
dropForRateLimiting bool
|
||||
// dropForLoadBalancing indicates whether this particular request should be
|
||||
// dropped by the client for load balancing.
|
||||
dropForLoadBalancing bool
|
||||
func (b *lbBuilder) Name() string {
|
||||
return grpclbName
|
||||
}
|
||||
|
||||
type balancer struct {
|
||||
r naming.Resolver
|
||||
target string
|
||||
mu sync.Mutex
|
||||
seq int // a sequence number to make sure addrCh does not get stale addresses.
|
||||
w naming.Watcher
|
||||
addrCh chan []Address
|
||||
rbs []remoteBalancerInfo
|
||||
addrs []*grpclbAddrInfo
|
||||
next int
|
||||
waitCh chan struct{}
|
||||
done bool
|
||||
expTimer *time.Timer
|
||||
rand *rand.Rand
|
||||
func (b *lbBuilder) Build(cc balancer.ClientConn, opt balancer.BuildOptions) balancer.Balancer {
|
||||
// This generates a manual resolver builder with a random scheme. This
|
||||
// scheme will be used to dial to remote LB, so we can send filtered address
|
||||
// updates to remote LB ClientConn using this manual resolver.
|
||||
scheme := "grpclb_internal_" + strconv.FormatInt(time.Now().UnixNano(), 36)
|
||||
r := &lbManualResolver{scheme: scheme, ccb: cc}
|
||||
|
||||
clientStats lbpb.ClientStats
|
||||
}
|
||||
|
||||
func (b *balancer) watchAddrUpdates(w naming.Watcher, ch chan []remoteBalancerInfo) error {
|
||||
updates, err := w.Next()
|
||||
if err != nil {
|
||||
grpclog.Warningf("grpclb: failed to get next addr update from watcher: %v", err)
|
||||
return err
|
||||
var target string
|
||||
targetSplitted := strings.Split(cc.Target(), ":///")
|
||||
if len(targetSplitted) < 2 {
|
||||
target = cc.Target()
|
||||
} else {
|
||||
target = targetSplitted[1]
|
||||
}
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.done {
|
||||
return ErrClientConnClosing
|
||||
|
||||
lb := &lbBalancer{
|
||||
cc: newLBCacheClientConn(cc),
|
||||
target: target,
|
||||
opt: opt,
|
||||
fallbackTimeout: b.fallbackTimeout,
|
||||
doneCh: make(chan struct{}),
|
||||
|
||||
manualResolver: r,
|
||||
csEvltr: &connectivityStateEvaluator{},
|
||||
subConns: make(map[resolver.Address]balancer.SubConn),
|
||||
scStates: make(map[balancer.SubConn]connectivity.State),
|
||||
picker: &errPicker{err: balancer.ErrNoSubConnAvailable},
|
||||
clientStats: &rpcStats{},
|
||||
}
|
||||
for _, update := range updates {
|
||||
switch update.Op {
|
||||
case naming.Add:
|
||||
var exist bool
|
||||
for _, v := range b.rbs {
|
||||
// TODO: Is the same addr with different server name a different balancer?
|
||||
if update.Addr == v.addr {
|
||||
exist = true
|
||||
break
|
||||
}
|
||||
|
||||
return lb
|
||||
}
|
||||
|
||||
type lbBalancer struct {
|
||||
cc *lbCacheClientConn
|
||||
target string
|
||||
opt balancer.BuildOptions
|
||||
fallbackTimeout time.Duration
|
||||
doneCh chan struct{}
|
||||
|
||||
// manualResolver is used in the remote LB ClientConn inside grpclb. When
|
||||
// resolved address updates are received by grpclb, filtered updates will be
|
||||
// send to remote LB ClientConn through this resolver.
|
||||
manualResolver *lbManualResolver
|
||||
// The ClientConn to talk to the remote balancer.
|
||||
ccRemoteLB *ClientConn
|
||||
|
||||
// Support client side load reporting. Each picker gets a reference to this,
|
||||
// and will update its content.
|
||||
clientStats *rpcStats
|
||||
|
||||
mu sync.Mutex // guards everything following.
|
||||
// The full server list including drops, used to check if the newly received
|
||||
// serverList contains anything new. Each generate picker will also have
|
||||
// reference to this list to do the first layer pick.
|
||||
fullServerList []*lbpb.Server
|
||||
// All backends addresses, with metadata set to nil. This list contains all
|
||||
// backend addresses in the same order and with the same duplicates as in
|
||||
// serverlist. When generating picker, a SubConn slice with the same order
|
||||
// but with only READY SCs will be gerenated.
|
||||
backendAddrs []resolver.Address
|
||||
// Roundrobin functionalities.
|
||||
csEvltr *connectivityStateEvaluator
|
||||
state connectivity.State
|
||||
subConns map[resolver.Address]balancer.SubConn // Used to new/remove SubConn.
|
||||
scStates map[balancer.SubConn]connectivity.State // Used to filter READY SubConns.
|
||||
picker balancer.Picker
|
||||
// Support fallback to resolved backend addresses if there's no response
|
||||
// from remote balancer within fallbackTimeout.
|
||||
fallbackTimerExpired bool
|
||||
serverListReceived bool
|
||||
// resolvedBackendAddrs is resolvedAddrs minus remote balancers. It's set
|
||||
// when resolved address updates are received, and read in the goroutine
|
||||
// handling fallback.
|
||||
resolvedBackendAddrs []resolver.Address
|
||||
}
|
||||
|
||||
// regeneratePicker takes a snapshot of the balancer, and generates a picker from
|
||||
// it. The picker
|
||||
// - always returns ErrTransientFailure if the balancer is in TransientFailure,
|
||||
// - does two layer roundrobin pick otherwise.
|
||||
// Caller must hold lb.mu.
|
||||
func (lb *lbBalancer) regeneratePicker() {
|
||||
if lb.state == connectivity.TransientFailure {
|
||||
lb.picker = &errPicker{err: balancer.ErrTransientFailure}
|
||||
return
|
||||
}
|
||||
var readySCs []balancer.SubConn
|
||||
for _, a := range lb.backendAddrs {
|
||||
if sc, ok := lb.subConns[a]; ok {
|
||||
if st, ok := lb.scStates[sc]; ok && st == connectivity.Ready {
|
||||
readySCs = append(readySCs, sc)
|
||||
}
|
||||
if exist {
|
||||
continue
|
||||
}
|
||||
md, ok := update.Metadata.(*AddrMetadataGRPCLB)
|
||||
if !ok {
|
||||
// TODO: Revisit the handling here and may introduce some fallback mechanism.
|
||||
grpclog.Errorf("The name resolution contains unexpected metadata %v", update.Metadata)
|
||||
continue
|
||||
}
|
||||
switch md.AddrType {
|
||||
case Backend:
|
||||
// TODO: Revisit the handling here and may introduce some fallback mechanism.
|
||||
grpclog.Errorf("The name resolution does not give grpclb addresses")
|
||||
continue
|
||||
case GRPCLB:
|
||||
b.rbs = append(b.rbs, remoteBalancerInfo{
|
||||
addr: update.Addr,
|
||||
name: md.ServerName,
|
||||
})
|
||||
default:
|
||||
grpclog.Errorf("Received unknow address type %d", md.AddrType)
|
||||
continue
|
||||
}
|
||||
case naming.Delete:
|
||||
for i, v := range b.rbs {
|
||||
if update.Addr == v.addr {
|
||||
copy(b.rbs[i:], b.rbs[i+1:])
|
||||
b.rbs = b.rbs[:len(b.rbs)-1]
|
||||
break
|
||||
}
|
||||
}
|
||||
default:
|
||||
grpclog.Errorf("Unknown update.Op %v", update.Op)
|
||||
}
|
||||
}
|
||||
// TODO: Fall back to the basic round-robin load balancing if the resulting address is
|
||||
// not a load balancer.
|
||||
|
||||
if len(lb.fullServerList) <= 0 {
|
||||
if len(readySCs) <= 0 {
|
||||
lb.picker = &errPicker{err: balancer.ErrNoSubConnAvailable}
|
||||
return
|
||||
}
|
||||
lb.picker = &rrPicker{subConns: readySCs}
|
||||
return
|
||||
}
|
||||
lb.picker = &lbPicker{
|
||||
serverList: lb.fullServerList,
|
||||
subConns: readySCs,
|
||||
stats: lb.clientStats,
|
||||
}
|
||||
}
|
||||
|
||||
func (lb *lbBalancer) HandleSubConnStateChange(sc balancer.SubConn, s connectivity.State) {
|
||||
grpclog.Infof("lbBalancer: handle SubConn state change: %p, %v", sc, s)
|
||||
lb.mu.Lock()
|
||||
defer lb.mu.Unlock()
|
||||
|
||||
oldS, ok := lb.scStates[sc]
|
||||
if !ok {
|
||||
grpclog.Infof("lbBalancer: got state changes for an unknown SubConn: %p, %v", sc, s)
|
||||
return
|
||||
}
|
||||
lb.scStates[sc] = s
|
||||
switch s {
|
||||
case connectivity.Idle:
|
||||
sc.Connect()
|
||||
case connectivity.Shutdown:
|
||||
// When an address was removed by resolver, b called RemoveSubConn but
|
||||
// kept the sc's state in scStates. Remove state for this sc here.
|
||||
delete(lb.scStates, sc)
|
||||
}
|
||||
|
||||
oldAggrState := lb.state
|
||||
lb.state = lb.csEvltr.recordTransition(oldS, s)
|
||||
|
||||
// Regenerate picker when one of the following happens:
|
||||
// - this sc became ready from not-ready
|
||||
// - this sc became not-ready from ready
|
||||
// - the aggregated state of balancer became TransientFailure from non-TransientFailure
|
||||
// - the aggregated state of balancer became non-TransientFailure from TransientFailure
|
||||
if (oldS == connectivity.Ready) != (s == connectivity.Ready) ||
|
||||
(lb.state == connectivity.TransientFailure) != (oldAggrState == connectivity.TransientFailure) {
|
||||
lb.regeneratePicker()
|
||||
}
|
||||
|
||||
lb.cc.UpdateBalancerState(lb.state, lb.picker)
|
||||
}
|
||||
|
||||
// fallbackToBackendsAfter blocks for fallbackTimeout and falls back to use
|
||||
// resolved backends (backends received from resolver, not from remote balancer)
|
||||
// if no connection to remote balancers was successful.
|
||||
func (lb *lbBalancer) fallbackToBackendsAfter(fallbackTimeout time.Duration) {
|
||||
timer := time.NewTimer(fallbackTimeout)
|
||||
defer timer.Stop()
|
||||
select {
|
||||
case <-ch:
|
||||
case <-timer.C:
|
||||
case <-lb.doneCh:
|
||||
return
|
||||
}
|
||||
lb.mu.Lock()
|
||||
if lb.serverListReceived {
|
||||
lb.mu.Unlock()
|
||||
return
|
||||
}
|
||||
lb.fallbackTimerExpired = true
|
||||
lb.refreshSubConns(lb.resolvedBackendAddrs)
|
||||
lb.mu.Unlock()
|
||||
}
|
||||
|
||||
// HandleResolvedAddrs sends the updated remoteLB addresses to remoteLB
|
||||
// clientConn. The remoteLB clientConn will handle creating/removing remoteLB
|
||||
// connections.
|
||||
func (lb *lbBalancer) HandleResolvedAddrs(addrs []resolver.Address, err error) {
|
||||
grpclog.Infof("lbBalancer: handleResolvedResult: %+v", addrs)
|
||||
if len(addrs) <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var remoteBalancerAddrs, backendAddrs []resolver.Address
|
||||
for _, a := range addrs {
|
||||
if a.Type == resolver.GRPCLB {
|
||||
remoteBalancerAddrs = append(remoteBalancerAddrs, a)
|
||||
} else {
|
||||
backendAddrs = append(backendAddrs, a)
|
||||
}
|
||||
}
|
||||
|
||||
if lb.ccRemoteLB == nil {
|
||||
if len(remoteBalancerAddrs) <= 0 {
|
||||
grpclog.Errorf("grpclb: no remote balancer address is available, should never happen")
|
||||
return
|
||||
}
|
||||
// First time receiving resolved addresses, create a cc to remote
|
||||
// balancers.
|
||||
lb.dialRemoteLB(remoteBalancerAddrs[0].ServerName)
|
||||
// Start the fallback goroutine.
|
||||
go lb.fallbackToBackendsAfter(lb.fallbackTimeout)
|
||||
}
|
||||
|
||||
// cc to remote balancers uses lb.manualResolver. Send the updated remote
|
||||
// balancer addresses to it through manualResolver.
|
||||
lb.manualResolver.NewAddress(remoteBalancerAddrs)
|
||||
|
||||
lb.mu.Lock()
|
||||
lb.resolvedBackendAddrs = backendAddrs
|
||||
// If serverListReceived is true, connection to remote balancer was
|
||||
// successful and there's no need to do fallback anymore.
|
||||
// If fallbackTimerExpired is false, fallback hasn't happened yet.
|
||||
if !lb.serverListReceived && lb.fallbackTimerExpired {
|
||||
// This means we received a new list of resolved backends, and we are
|
||||
// still in fallback mode. Need to update the list of backends we are
|
||||
// using to the new list of backends.
|
||||
lb.refreshSubConns(lb.resolvedBackendAddrs)
|
||||
}
|
||||
lb.mu.Unlock()
|
||||
}
|
||||
|
||||
func (lb *lbBalancer) Close() {
|
||||
select {
|
||||
case <-lb.doneCh:
|
||||
return
|
||||
default:
|
||||
}
|
||||
ch <- b.rbs
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *balancer) serverListExpire(seq int) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
// TODO: gRPC interanls do not clear the connections when the server list is stale.
|
||||
// This means RPCs will keep using the existing server list until b receives new
|
||||
// server list even though the list is expired. Revisit this behavior later.
|
||||
if b.done || seq < b.seq {
|
||||
return
|
||||
}
|
||||
b.next = 0
|
||||
b.addrs = nil
|
||||
// Ask grpc internals to close all the corresponding connections.
|
||||
b.addrCh <- nil
|
||||
}
|
||||
|
||||
func convertDuration(d *lbpb.Duration) time.Duration {
|
||||
if d == nil {
|
||||
return 0
|
||||
}
|
||||
return time.Duration(d.Seconds)*time.Second + time.Duration(d.Nanos)*time.Nanosecond
|
||||
}
|
||||
|
||||
func (b *balancer) processServerList(l *lbpb.ServerList, seq int) {
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
servers := l.GetServers()
|
||||
expiration := convertDuration(l.GetExpirationInterval())
|
||||
var (
|
||||
sl []*grpclbAddrInfo
|
||||
addrs []Address
|
||||
)
|
||||
for _, s := range servers {
|
||||
md := metadata.Pairs("lb-token", s.LoadBalanceToken)
|
||||
ip := net.IP(s.IpAddress)
|
||||
ipStr := ip.String()
|
||||
if ip.To4() == nil {
|
||||
// Add square brackets to ipv6 addresses, otherwise net.Dial() and
|
||||
// net.SplitHostPort() will return too many colons error.
|
||||
ipStr = fmt.Sprintf("[%s]", ipStr)
|
||||
}
|
||||
addr := Address{
|
||||
Addr: fmt.Sprintf("%s:%d", ipStr, s.Port),
|
||||
Metadata: &md,
|
||||
}
|
||||
sl = append(sl, &grpclbAddrInfo{
|
||||
addr: addr,
|
||||
dropForRateLimiting: s.DropForRateLimiting,
|
||||
dropForLoadBalancing: s.DropForLoadBalancing,
|
||||
})
|
||||
addrs = append(addrs, addr)
|
||||
}
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.done || seq < b.seq {
|
||||
return
|
||||
}
|
||||
if len(sl) > 0 {
|
||||
// reset b.next to 0 when replacing the server list.
|
||||
b.next = 0
|
||||
b.addrs = sl
|
||||
b.addrCh <- addrs
|
||||
if b.expTimer != nil {
|
||||
b.expTimer.Stop()
|
||||
b.expTimer = nil
|
||||
}
|
||||
if expiration > 0 {
|
||||
b.expTimer = time.AfterFunc(expiration, func() {
|
||||
b.serverListExpire(seq)
|
||||
})
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (b *balancer) sendLoadReport(s *balanceLoadClientStream, interval time.Duration, done <-chan struct{}) {
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
b.mu.Lock()
|
||||
stats := b.clientStats
|
||||
b.clientStats = lbpb.ClientStats{} // Clear the stats.
|
||||
b.mu.Unlock()
|
||||
t := time.Now()
|
||||
stats.Timestamp = &lbpb.Timestamp{
|
||||
Seconds: t.Unix(),
|
||||
Nanos: int32(t.Nanosecond()),
|
||||
}
|
||||
if err := s.Send(&lbpb.LoadBalanceRequest{
|
||||
LoadBalanceRequestType: &lbpb.LoadBalanceRequest_ClientStats{
|
||||
ClientStats: &stats,
|
||||
},
|
||||
}); err != nil {
|
||||
grpclog.Errorf("grpclb: failed to send load report: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *balancer) callRemoteBalancer(lbc *loadBalancerClient, seq int) (retry bool) {
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
stream, err := lbc.BalanceLoad(ctx)
|
||||
if err != nil {
|
||||
grpclog.Errorf("grpclb: failed to perform RPC to the remote balancer %v", err)
|
||||
return
|
||||
}
|
||||
b.mu.Lock()
|
||||
if b.done {
|
||||
b.mu.Unlock()
|
||||
return
|
||||
}
|
||||
b.mu.Unlock()
|
||||
initReq := &lbpb.LoadBalanceRequest{
|
||||
LoadBalanceRequestType: &lbpb.LoadBalanceRequest_InitialRequest{
|
||||
InitialRequest: &lbpb.InitialLoadBalanceRequest{
|
||||
Name: b.target,
|
||||
},
|
||||
},
|
||||
}
|
||||
if err := stream.Send(initReq); err != nil {
|
||||
grpclog.Errorf("grpclb: failed to send init request: %v", err)
|
||||
// TODO: backoff on retry?
|
||||
return true
|
||||
}
|
||||
reply, err := stream.Recv()
|
||||
if err != nil {
|
||||
grpclog.Errorf("grpclb: failed to recv init response: %v", err)
|
||||
// TODO: backoff on retry?
|
||||
return true
|
||||
}
|
||||
initResp := reply.GetInitialResponse()
|
||||
if initResp == nil {
|
||||
grpclog.Errorf("grpclb: reply from remote balancer did not include initial response.")
|
||||
return
|
||||
}
|
||||
// TODO: Support delegation.
|
||||
if initResp.LoadBalancerDelegate != "" {
|
||||
// delegation
|
||||
grpclog.Errorf("TODO: Delegation is not supported yet.")
|
||||
return
|
||||
}
|
||||
streamDone := make(chan struct{})
|
||||
defer close(streamDone)
|
||||
b.mu.Lock()
|
||||
b.clientStats = lbpb.ClientStats{} // Clear client stats.
|
||||
b.mu.Unlock()
|
||||
if d := convertDuration(initResp.ClientStatsReportInterval); d > 0 {
|
||||
go b.sendLoadReport(stream, d, streamDone)
|
||||
}
|
||||
// Retrieve the server list.
|
||||
for {
|
||||
reply, err := stream.Recv()
|
||||
if err != nil {
|
||||
grpclog.Errorf("grpclb: failed to recv server list: %v", err)
|
||||
break
|
||||
}
|
||||
b.mu.Lock()
|
||||
if b.done || seq < b.seq {
|
||||
b.mu.Unlock()
|
||||
return
|
||||
}
|
||||
b.seq++ // tick when receiving a new list of servers.
|
||||
seq = b.seq
|
||||
b.mu.Unlock()
|
||||
if serverList := reply.GetServerList(); serverList != nil {
|
||||
b.processServerList(serverList, seq)
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (b *balancer) Start(target string, config BalancerConfig) error {
|
||||
b.rand = rand.New(rand.NewSource(time.Now().Unix()))
|
||||
// TODO: Fall back to the basic direct connection if there is no name resolver.
|
||||
if b.r == nil {
|
||||
return errors.New("there is no name resolver installed")
|
||||
}
|
||||
b.target = target
|
||||
b.mu.Lock()
|
||||
if b.done {
|
||||
b.mu.Unlock()
|
||||
return ErrClientConnClosing
|
||||
}
|
||||
b.addrCh = make(chan []Address)
|
||||
w, err := b.r.Resolve(target)
|
||||
if err != nil {
|
||||
b.mu.Unlock()
|
||||
grpclog.Errorf("grpclb: failed to resolve address: %v, err: %v", target, err)
|
||||
return err
|
||||
}
|
||||
b.w = w
|
||||
b.mu.Unlock()
|
||||
balancerAddrsCh := make(chan []remoteBalancerInfo, 1)
|
||||
// Spawn a goroutine to monitor the name resolution of remote load balancer.
|
||||
go func() {
|
||||
for {
|
||||
if err := b.watchAddrUpdates(w, balancerAddrsCh); err != nil {
|
||||
grpclog.Warningf("grpclb: the naming watcher stops working due to %v.\n", err)
|
||||
close(balancerAddrsCh)
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
// Spawn a goroutine to talk to the remote load balancer.
|
||||
go func() {
|
||||
var (
|
||||
cc *ClientConn
|
||||
// ccError is closed when there is an error in the current cc.
|
||||
// A new rb should be picked from rbs and connected.
|
||||
ccError chan struct{}
|
||||
rb *remoteBalancerInfo
|
||||
rbs []remoteBalancerInfo
|
||||
rbIdx int
|
||||
)
|
||||
|
||||
defer func() {
|
||||
if ccError != nil {
|
||||
select {
|
||||
case <-ccError:
|
||||
default:
|
||||
close(ccError)
|
||||
}
|
||||
}
|
||||
if cc != nil {
|
||||
cc.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
var ok bool
|
||||
select {
|
||||
case rbs, ok = <-balancerAddrsCh:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
foundIdx := -1
|
||||
if rb != nil {
|
||||
for i, trb := range rbs {
|
||||
if trb == *rb {
|
||||
foundIdx = i
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if foundIdx >= 0 {
|
||||
if foundIdx >= 1 {
|
||||
// Move the address in use to the beginning of the list.
|
||||
b.rbs[0], b.rbs[foundIdx] = b.rbs[foundIdx], b.rbs[0]
|
||||
rbIdx = 0
|
||||
}
|
||||
continue // If found, don't dial new cc.
|
||||
} else if len(rbs) > 0 {
|
||||
// Pick a random one from the list, instead of always using the first one.
|
||||
if l := len(rbs); l > 1 && rb != nil {
|
||||
tmpIdx := b.rand.Intn(l - 1)
|
||||
b.rbs[0], b.rbs[tmpIdx] = b.rbs[tmpIdx], b.rbs[0]
|
||||
}
|
||||
rbIdx = 0
|
||||
rb = &rbs[0]
|
||||
} else {
|
||||
// foundIdx < 0 && len(rbs) <= 0.
|
||||
rb = nil
|
||||
}
|
||||
case <-ccError:
|
||||
ccError = nil
|
||||
if rbIdx < len(rbs)-1 {
|
||||
rbIdx++
|
||||
rb = &rbs[rbIdx]
|
||||
} else {
|
||||
rb = nil
|
||||
}
|
||||
}
|
||||
|
||||
if rb == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if cc != nil {
|
||||
cc.Close()
|
||||
}
|
||||
// Talk to the remote load balancer to get the server list.
|
||||
var (
|
||||
err error
|
||||
dopts []DialOption
|
||||
)
|
||||
if creds := config.DialCreds; creds != nil {
|
||||
if rb.name != "" {
|
||||
if err := creds.OverrideServerName(rb.name); err != nil {
|
||||
grpclog.Warningf("grpclb: failed to override the server name in the credentials: %v", err)
|
||||
continue
|
||||
}
|
||||
}
|
||||
dopts = append(dopts, WithTransportCredentials(creds))
|
||||
} else {
|
||||
dopts = append(dopts, WithInsecure())
|
||||
}
|
||||
if dialer := config.Dialer; dialer != nil {
|
||||
// WithDialer takes a different type of function, so we instead use a special DialOption here.
|
||||
dopts = append(dopts, func(o *dialOptions) { o.copts.Dialer = dialer })
|
||||
}
|
||||
ccError = make(chan struct{})
|
||||
cc, err = Dial(rb.addr, dopts...)
|
||||
if err != nil {
|
||||
grpclog.Warningf("grpclb: failed to setup a connection to the remote balancer %v: %v", rb.addr, err)
|
||||
close(ccError)
|
||||
continue
|
||||
}
|
||||
b.mu.Lock()
|
||||
b.seq++ // tick when getting a new balancer address
|
||||
seq := b.seq
|
||||
b.next = 0
|
||||
b.mu.Unlock()
|
||||
go func(cc *ClientConn, ccError chan struct{}) {
|
||||
lbc := &loadBalancerClient{cc}
|
||||
b.callRemoteBalancer(lbc, seq)
|
||||
cc.Close()
|
||||
select {
|
||||
case <-ccError:
|
||||
default:
|
||||
close(ccError)
|
||||
}
|
||||
}(cc, ccError)
|
||||
}
|
||||
}()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *balancer) down(addr Address, err error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
for _, a := range b.addrs {
|
||||
if addr == a.addr {
|
||||
a.connected = false
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *balancer) Up(addr Address) func(error) {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.done {
|
||||
return nil
|
||||
}
|
||||
var cnt int
|
||||
for _, a := range b.addrs {
|
||||
if a.addr == addr {
|
||||
if a.connected {
|
||||
return nil
|
||||
}
|
||||
a.connected = true
|
||||
}
|
||||
if a.connected && !a.dropForRateLimiting && !a.dropForLoadBalancing {
|
||||
cnt++
|
||||
}
|
||||
}
|
||||
// addr is the only one which is connected. Notify the Get() callers who are blocking.
|
||||
if cnt == 1 && b.waitCh != nil {
|
||||
close(b.waitCh)
|
||||
b.waitCh = nil
|
||||
}
|
||||
return func(err error) {
|
||||
b.down(addr, err)
|
||||
}
|
||||
}
|
||||
|
||||
func (b *balancer) Get(ctx context.Context, opts BalancerGetOptions) (addr Address, put func(), err error) {
|
||||
var ch chan struct{}
|
||||
b.mu.Lock()
|
||||
if b.done {
|
||||
b.mu.Unlock()
|
||||
err = ErrClientConnClosing
|
||||
return
|
||||
}
|
||||
seq := b.seq
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
put = func() {
|
||||
s, ok := rpcInfoFromContext(ctx)
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.done || seq < b.seq {
|
||||
return
|
||||
}
|
||||
b.clientStats.NumCallsFinished++
|
||||
if !s.bytesSent {
|
||||
b.clientStats.NumCallsFinishedWithClientFailedToSend++
|
||||
} else if s.bytesReceived {
|
||||
b.clientStats.NumCallsFinishedKnownReceived++
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
b.clientStats.NumCallsStarted++
|
||||
if len(b.addrs) > 0 {
|
||||
if b.next >= len(b.addrs) {
|
||||
b.next = 0
|
||||
}
|
||||
next := b.next
|
||||
for {
|
||||
a := b.addrs[next]
|
||||
next = (next + 1) % len(b.addrs)
|
||||
if a.connected {
|
||||
if !a.dropForRateLimiting && !a.dropForLoadBalancing {
|
||||
addr = a.addr
|
||||
b.next = next
|
||||
b.mu.Unlock()
|
||||
return
|
||||
}
|
||||
if !opts.BlockingWait {
|
||||
b.next = next
|
||||
if a.dropForLoadBalancing {
|
||||
b.clientStats.NumCallsFinished++
|
||||
b.clientStats.NumCallsFinishedWithDropForLoadBalancing++
|
||||
} else if a.dropForRateLimiting {
|
||||
b.clientStats.NumCallsFinished++
|
||||
b.clientStats.NumCallsFinishedWithDropForRateLimiting++
|
||||
}
|
||||
b.mu.Unlock()
|
||||
err = Errorf(codes.Unavailable, "%s drops requests", a.addr.Addr)
|
||||
return
|
||||
}
|
||||
}
|
||||
if next == b.next {
|
||||
// Has iterated all the possible address but none is connected.
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !opts.BlockingWait {
|
||||
if len(b.addrs) == 0 {
|
||||
b.clientStats.NumCallsFinished++
|
||||
b.clientStats.NumCallsFinishedWithClientFailedToSend++
|
||||
b.mu.Unlock()
|
||||
err = Errorf(codes.Unavailable, "there is no address available")
|
||||
return
|
||||
}
|
||||
// Returns the next addr on b.addrs for a failfast RPC.
|
||||
addr = b.addrs[b.next].addr
|
||||
b.next++
|
||||
b.mu.Unlock()
|
||||
return
|
||||
}
|
||||
// Wait on b.waitCh for non-failfast RPCs.
|
||||
if b.waitCh == nil {
|
||||
ch = make(chan struct{})
|
||||
b.waitCh = ch
|
||||
} else {
|
||||
ch = b.waitCh
|
||||
}
|
||||
b.mu.Unlock()
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
b.mu.Lock()
|
||||
b.clientStats.NumCallsFinished++
|
||||
b.clientStats.NumCallsFinishedWithClientFailedToSend++
|
||||
b.mu.Unlock()
|
||||
err = ctx.Err()
|
||||
return
|
||||
case <-ch:
|
||||
b.mu.Lock()
|
||||
if b.done {
|
||||
b.clientStats.NumCallsFinished++
|
||||
b.clientStats.NumCallsFinishedWithClientFailedToSend++
|
||||
b.mu.Unlock()
|
||||
err = ErrClientConnClosing
|
||||
return
|
||||
}
|
||||
|
||||
if len(b.addrs) > 0 {
|
||||
if b.next >= len(b.addrs) {
|
||||
b.next = 0
|
||||
}
|
||||
next := b.next
|
||||
for {
|
||||
a := b.addrs[next]
|
||||
next = (next + 1) % len(b.addrs)
|
||||
if a.connected {
|
||||
if !a.dropForRateLimiting && !a.dropForLoadBalancing {
|
||||
addr = a.addr
|
||||
b.next = next
|
||||
b.mu.Unlock()
|
||||
return
|
||||
}
|
||||
if !opts.BlockingWait {
|
||||
b.next = next
|
||||
if a.dropForLoadBalancing {
|
||||
b.clientStats.NumCallsFinished++
|
||||
b.clientStats.NumCallsFinishedWithDropForLoadBalancing++
|
||||
} else if a.dropForRateLimiting {
|
||||
b.clientStats.NumCallsFinished++
|
||||
b.clientStats.NumCallsFinishedWithDropForRateLimiting++
|
||||
}
|
||||
b.mu.Unlock()
|
||||
err = Errorf(codes.Unavailable, "drop requests for the addreess %s", a.addr.Addr)
|
||||
return
|
||||
}
|
||||
}
|
||||
if next == b.next {
|
||||
// Has iterated all the possible address but none is connected.
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
// The newly added addr got removed by Down() again.
|
||||
if b.waitCh == nil {
|
||||
ch = make(chan struct{})
|
||||
b.waitCh = ch
|
||||
} else {
|
||||
ch = b.waitCh
|
||||
}
|
||||
b.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (b *balancer) Notify() <-chan []Address {
|
||||
return b.addrCh
|
||||
}
|
||||
|
||||
func (b *balancer) Close() error {
|
||||
b.mu.Lock()
|
||||
defer b.mu.Unlock()
|
||||
if b.done {
|
||||
return errBalancerClosed
|
||||
}
|
||||
b.done = true
|
||||
if b.expTimer != nil {
|
||||
b.expTimer.Stop()
|
||||
}
|
||||
if b.waitCh != nil {
|
||||
close(b.waitCh)
|
||||
}
|
||||
if b.addrCh != nil {
|
||||
close(b.addrCh)
|
||||
}
|
||||
if b.w != nil {
|
||||
b.w.Close()
|
||||
}
|
||||
return nil
|
||||
close(lb.doneCh)
|
||||
if lb.ccRemoteLB != nil {
|
||||
lb.ccRemoteLB.Close()
|
||||
}
|
||||
lb.cc.close()
|
||||
}
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue