Switch to golang/dep.
This commit is contained in:
parent
d88554fa92
commit
044d87d96d
239 changed files with 42372 additions and 7011 deletions
86
vendor/github.com/coreos/etcd/etcdserver/api/capability.go
generated
vendored
Normal file
86
vendor/github.com/coreos/etcd/etcdserver/api/capability.go
generated
vendored
Normal file
|
@ -0,0 +1,86 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/coreos/etcd/version"
|
||||
"github.com/coreos/go-semver/semver"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
)
|
||||
|
||||
type Capability string
|
||||
|
||||
const (
|
||||
AuthCapability Capability = "auth"
|
||||
V3rpcCapability Capability = "v3rpc"
|
||||
)
|
||||
|
||||
var (
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api")
|
||||
|
||||
// capabilityMaps is a static map of version to capability map.
|
||||
capabilityMaps = map[string]map[Capability]bool{
|
||||
"3.0.0": {AuthCapability: true, V3rpcCapability: true},
|
||||
"3.1.0": {AuthCapability: true, V3rpcCapability: true},
|
||||
"3.2.0": {AuthCapability: true, V3rpcCapability: true},
|
||||
}
|
||||
|
||||
enableMapMu sync.RWMutex
|
||||
// enabledMap points to a map in capabilityMaps
|
||||
enabledMap map[Capability]bool
|
||||
|
||||
curVersion *semver.Version
|
||||
)
|
||||
|
||||
func init() {
|
||||
enabledMap = map[Capability]bool{
|
||||
AuthCapability: true,
|
||||
V3rpcCapability: true,
|
||||
}
|
||||
}
|
||||
|
||||
// UpdateCapability updates the enabledMap when the cluster version increases.
|
||||
func UpdateCapability(v *semver.Version) {
|
||||
if v == nil {
|
||||
// if recovered but version was never set by cluster
|
||||
return
|
||||
}
|
||||
enableMapMu.Lock()
|
||||
if curVersion != nil && !curVersion.LessThan(*v) {
|
||||
enableMapMu.Unlock()
|
||||
return
|
||||
}
|
||||
curVersion = v
|
||||
enabledMap = capabilityMaps[curVersion.String()]
|
||||
enableMapMu.Unlock()
|
||||
plog.Infof("enabled capabilities for version %s", version.Cluster(v.String()))
|
||||
}
|
||||
|
||||
func IsCapabilityEnabled(c Capability) bool {
|
||||
enableMapMu.RLock()
|
||||
defer enableMapMu.RUnlock()
|
||||
if enabledMap == nil {
|
||||
return false
|
||||
}
|
||||
return enabledMap[c]
|
||||
}
|
||||
|
||||
func EnableCapability(c Capability) {
|
||||
enableMapMu.Lock()
|
||||
defer enableMapMu.Unlock()
|
||||
enabledMap[c] = true
|
||||
}
|
41
vendor/github.com/coreos/etcd/etcdserver/api/cluster.go
generated
vendored
Normal file
41
vendor/github.com/coreos/etcd/etcdserver/api/cluster.go
generated
vendored
Normal file
|
@ -0,0 +1,41 @@
|
|||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package api
|
||||
|
||||
import (
|
||||
"github.com/coreos/etcd/etcdserver/membership"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
|
||||
"github.com/coreos/go-semver/semver"
|
||||
)
|
||||
|
||||
// Cluster is an interface representing a collection of members in one etcd cluster.
|
||||
type Cluster interface {
|
||||
// ID returns the cluster ID
|
||||
ID() types.ID
|
||||
// ClientURLs returns an aggregate set of all URLs on which this
|
||||
// cluster is listening for client requests
|
||||
ClientURLs() []string
|
||||
// Members returns a slice of members sorted by their ID
|
||||
Members() []*membership.Member
|
||||
// Member retrieves a particular member based on ID, or nil if the
|
||||
// member does not exist in the cluster
|
||||
Member(id types.ID) *membership.Member
|
||||
// IsIDRemoved checks whether the given ID has been removed from this
|
||||
// cluster at some point in the past
|
||||
IsIDRemoved(id types.ID) bool
|
||||
// Version is the cluster-wide minimum major.minor version.
|
||||
Version() *semver.Version
|
||||
}
|
16
vendor/github.com/coreos/etcd/etcdserver/api/doc.go
generated
vendored
Normal file
16
vendor/github.com/coreos/etcd/etcdserver/api/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package api manages the capabilities and features that are exposed to clients by the etcd cluster.
|
||||
package api
|
157
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/auth.go
generated
vendored
Normal file
157
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/auth.go
generated
vendored
Normal file
|
@ -0,0 +1,157 @@
|
|||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type AuthServer struct {
|
||||
authenticator etcdserver.Authenticator
|
||||
}
|
||||
|
||||
func NewAuthServer(s *etcdserver.EtcdServer) *AuthServer {
|
||||
return &AuthServer{authenticator: s}
|
||||
}
|
||||
|
||||
func (as *AuthServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) {
|
||||
resp, err := as.authenticator.AuthEnable(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) {
|
||||
resp, err := as.authenticator.AuthDisable(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) {
|
||||
resp, err := as.authenticator.Authenticate(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
|
||||
resp, err := as.authenticator.RoleAdd(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
|
||||
resp, err := as.authenticator.RoleDelete(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
|
||||
resp, err := as.authenticator.RoleGet(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
|
||||
resp, err := as.authenticator.RoleList(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
|
||||
resp, err := as.authenticator.RoleRevokePermission(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
|
||||
resp, err := as.authenticator.RoleGrantPermission(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
|
||||
resp, err := as.authenticator.UserAdd(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
|
||||
resp, err := as.authenticator.UserDelete(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
|
||||
resp, err := as.authenticator.UserGet(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
|
||||
resp, err := as.authenticator.UserList(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
|
||||
resp, err := as.authenticator.UserGrantRole(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
|
||||
resp, err := as.authenticator.UserRevokeRole(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (as *AuthServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
|
||||
resp, err := as.authenticator.UserChangePassword(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
34
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/codec.go
generated
vendored
Normal file
34
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/codec.go
generated
vendored
Normal file
|
@ -0,0 +1,34 @@
|
|||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import "github.com/gogo/protobuf/proto"
|
||||
|
||||
type codec struct{}
|
||||
|
||||
func (c *codec) Marshal(v interface{}) ([]byte, error) {
|
||||
b, err := proto.Marshal(v.(proto.Message))
|
||||
sentBytes.Add(float64(len(b)))
|
||||
return b, err
|
||||
}
|
||||
|
||||
func (c *codec) Unmarshal(data []byte, v interface{}) error {
|
||||
receivedBytes.Add(float64(len(data)))
|
||||
return proto.Unmarshal(data, v.(proto.Message))
|
||||
}
|
||||
|
||||
func (c *codec) String() string {
|
||||
return "proto"
|
||||
}
|
53
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go
generated
vendored
Normal file
53
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/grpc.go
generated
vendored
Normal file
|
@ -0,0 +1,53 @@
|
|||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"crypto/tls"
|
||||
"math"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/credentials"
|
||||
"google.golang.org/grpc/grpclog"
|
||||
)
|
||||
|
||||
const maxStreams = math.MaxUint32
|
||||
|
||||
func init() {
|
||||
grpclog.SetLogger(plog)
|
||||
}
|
||||
|
||||
func Server(s *etcdserver.EtcdServer, tls *tls.Config) *grpc.Server {
|
||||
var opts []grpc.ServerOption
|
||||
opts = append(opts, grpc.CustomCodec(&codec{}))
|
||||
if tls != nil {
|
||||
opts = append(opts, grpc.Creds(credentials.NewTLS(tls)))
|
||||
}
|
||||
opts = append(opts, grpc.UnaryInterceptor(newUnaryInterceptor(s)))
|
||||
opts = append(opts, grpc.StreamInterceptor(newStreamInterceptor(s)))
|
||||
opts = append(opts, grpc.MaxConcurrentStreams(maxStreams))
|
||||
grpcServer := grpc.NewServer(opts...)
|
||||
|
||||
pb.RegisterKVServer(grpcServer, NewQuotaKVServer(s))
|
||||
pb.RegisterWatchServer(grpcServer, NewWatchServer(s))
|
||||
pb.RegisterLeaseServer(grpcServer, NewQuotaLeaseServer(s))
|
||||
pb.RegisterClusterServer(grpcServer, NewClusterServer(s))
|
||||
pb.RegisterAuthServer(grpcServer, NewAuthServer(s))
|
||||
pb.RegisterMaintenanceServer(grpcServer, NewMaintenanceServer(s))
|
||||
|
||||
return grpcServer
|
||||
}
|
46
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go
generated
vendored
Normal file
46
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/header.go
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
)
|
||||
|
||||
type header struct {
|
||||
clusterID int64
|
||||
memberID int64
|
||||
raftTimer etcdserver.RaftTimer
|
||||
rev func() int64
|
||||
}
|
||||
|
||||
func newHeader(s *etcdserver.EtcdServer) header {
|
||||
return header{
|
||||
clusterID: int64(s.Cluster().ID()),
|
||||
memberID: int64(s.ID()),
|
||||
raftTimer: s,
|
||||
rev: func() int64 { return s.KV().Rev() },
|
||||
}
|
||||
}
|
||||
|
||||
// fill populates pb.ResponseHeader using etcdserver information
|
||||
func (h *header) fill(rh *pb.ResponseHeader) {
|
||||
rh.ClusterId = uint64(h.clusterID)
|
||||
rh.MemberId = uint64(h.memberID)
|
||||
rh.RaftTerm = h.raftTimer.Term()
|
||||
if rh.Revision == 0 {
|
||||
rh.Revision = h.rev()
|
||||
}
|
||||
}
|
144
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go
generated
vendored
Normal file
144
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/interceptor.go
generated
vendored
Normal file
|
@ -0,0 +1,144 @@
|
|||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"github.com/coreos/etcd/raft"
|
||||
|
||||
prometheus "github.com/grpc-ecosystem/go-grpc-prometheus"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
const (
|
||||
maxNoLeaderCnt = 3
|
||||
)
|
||||
|
||||
type streamsMap struct {
|
||||
mu sync.Mutex
|
||||
streams map[grpc.ServerStream]struct{}
|
||||
}
|
||||
|
||||
func newUnaryInterceptor(s *etcdserver.EtcdServer) grpc.UnaryServerInterceptor {
|
||||
return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
|
||||
if !api.IsCapabilityEnabled(api.V3rpcCapability) {
|
||||
return nil, rpctypes.ErrGRPCNotCapable
|
||||
}
|
||||
|
||||
md, ok := metadata.FromContext(ctx)
|
||||
if ok {
|
||||
if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
|
||||
if s.Leader() == types.ID(raft.None) {
|
||||
return nil, rpctypes.ErrGRPCNoLeader
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return prometheus.UnaryServerInterceptor(ctx, req, info, handler)
|
||||
}
|
||||
}
|
||||
|
||||
func newStreamInterceptor(s *etcdserver.EtcdServer) grpc.StreamServerInterceptor {
|
||||
smap := monitorLeader(s)
|
||||
|
||||
return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
|
||||
if !api.IsCapabilityEnabled(api.V3rpcCapability) {
|
||||
return rpctypes.ErrGRPCNotCapable
|
||||
}
|
||||
|
||||
md, ok := metadata.FromContext(ss.Context())
|
||||
if ok {
|
||||
if ks := md[rpctypes.MetadataRequireLeaderKey]; len(ks) > 0 && ks[0] == rpctypes.MetadataHasLeader {
|
||||
if s.Leader() == types.ID(raft.None) {
|
||||
return rpctypes.ErrGRPCNoLeader
|
||||
}
|
||||
|
||||
cctx, cancel := context.WithCancel(ss.Context())
|
||||
ss = serverStreamWithCtx{ctx: cctx, cancel: &cancel, ServerStream: ss}
|
||||
|
||||
smap.mu.Lock()
|
||||
smap.streams[ss] = struct{}{}
|
||||
smap.mu.Unlock()
|
||||
|
||||
defer func() {
|
||||
smap.mu.Lock()
|
||||
delete(smap.streams, ss)
|
||||
smap.mu.Unlock()
|
||||
cancel()
|
||||
}()
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
return prometheus.StreamServerInterceptor(srv, ss, info, handler)
|
||||
}
|
||||
}
|
||||
|
||||
type serverStreamWithCtx struct {
|
||||
grpc.ServerStream
|
||||
ctx context.Context
|
||||
cancel *context.CancelFunc
|
||||
}
|
||||
|
||||
func (ssc serverStreamWithCtx) Context() context.Context { return ssc.ctx }
|
||||
|
||||
func monitorLeader(s *etcdserver.EtcdServer) *streamsMap {
|
||||
smap := &streamsMap{
|
||||
streams: make(map[grpc.ServerStream]struct{}),
|
||||
}
|
||||
|
||||
go func() {
|
||||
election := time.Duration(s.Cfg.TickMs) * time.Duration(s.Cfg.ElectionTicks) * time.Millisecond
|
||||
noLeaderCnt := 0
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-s.StopNotify():
|
||||
return
|
||||
case <-time.After(election):
|
||||
if s.Leader() == types.ID(raft.None) {
|
||||
noLeaderCnt++
|
||||
} else {
|
||||
noLeaderCnt = 0
|
||||
}
|
||||
|
||||
// We are more conservative on canceling existing streams. Reconnecting streams
|
||||
// cost much more than just rejecting new requests. So we wait until the member
|
||||
// cannot find a leader for maxNoLeaderCnt election timeouts to cancel existing streams.
|
||||
if noLeaderCnt >= maxNoLeaderCnt {
|
||||
smap.mu.Lock()
|
||||
for ss := range smap.streams {
|
||||
if ssWithCtx, ok := ss.(serverStreamWithCtx); ok {
|
||||
(*ssWithCtx.cancel)()
|
||||
<-ss.Context().Done()
|
||||
}
|
||||
}
|
||||
smap.streams = make(map[grpc.ServerStream]struct{})
|
||||
smap.mu.Unlock()
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return smap
|
||||
}
|
259
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go
generated
vendored
Normal file
259
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/key.go
generated
vendored
Normal file
|
@ -0,0 +1,259 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package v3rpc implements etcd v3 RPC system based on gRPC.
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
var (
|
||||
plog = capnslog.NewPackageLogger("github.com/coreos/etcd", "etcdserver/api/v3rpc")
|
||||
|
||||
// Max operations per txn list. For example, Txn.Success can have at most 128 operations,
|
||||
// and Txn.Failure can have at most 128 operations.
|
||||
MaxOpsPerTxn = 128
|
||||
)
|
||||
|
||||
type kvServer struct {
|
||||
hdr header
|
||||
kv etcdserver.RaftKV
|
||||
}
|
||||
|
||||
func NewKVServer(s *etcdserver.EtcdServer) pb.KVServer {
|
||||
return &kvServer{hdr: newHeader(s), kv: s}
|
||||
}
|
||||
|
||||
func (s *kvServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
|
||||
if err := checkRangeRequest(r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := s.kv.Range(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
|
||||
if resp.Header == nil {
|
||||
plog.Panic("unexpected nil resp.Header")
|
||||
}
|
||||
s.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *kvServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
|
||||
if err := checkPutRequest(r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := s.kv.Put(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
|
||||
if resp.Header == nil {
|
||||
plog.Panic("unexpected nil resp.Header")
|
||||
}
|
||||
s.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *kvServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
|
||||
if err := checkDeleteRequest(r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := s.kv.DeleteRange(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
|
||||
if resp.Header == nil {
|
||||
plog.Panic("unexpected nil resp.Header")
|
||||
}
|
||||
s.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *kvServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
if err := checkTxnRequest(r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resp, err := s.kv.Txn(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
|
||||
if resp.Header == nil {
|
||||
plog.Panic("unexpected nil resp.Header")
|
||||
}
|
||||
s.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *kvServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
|
||||
resp, err := s.kv.Compact(ctx, r)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
|
||||
if resp.Header == nil {
|
||||
plog.Panic("unexpected nil resp.Header")
|
||||
}
|
||||
s.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func checkRangeRequest(r *pb.RangeRequest) error {
|
||||
if len(r.Key) == 0 {
|
||||
return rpctypes.ErrGRPCEmptyKey
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkPutRequest(r *pb.PutRequest) error {
|
||||
if len(r.Key) == 0 {
|
||||
return rpctypes.ErrGRPCEmptyKey
|
||||
}
|
||||
if r.IgnoreValue && len(r.Value) != 0 {
|
||||
return rpctypes.ErrGRPCValueProvided
|
||||
}
|
||||
if r.IgnoreLease && r.Lease != 0 {
|
||||
return rpctypes.ErrGRPCLeaseProvided
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkDeleteRequest(r *pb.DeleteRangeRequest) error {
|
||||
if len(r.Key) == 0 {
|
||||
return rpctypes.ErrGRPCEmptyKey
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkTxnRequest(r *pb.TxnRequest) error {
|
||||
if len(r.Compare) > MaxOpsPerTxn || len(r.Success) > MaxOpsPerTxn || len(r.Failure) > MaxOpsPerTxn {
|
||||
return rpctypes.ErrGRPCTooManyOps
|
||||
}
|
||||
|
||||
for _, c := range r.Compare {
|
||||
if len(c.Key) == 0 {
|
||||
return rpctypes.ErrGRPCEmptyKey
|
||||
}
|
||||
}
|
||||
|
||||
for _, u := range r.Success {
|
||||
if err := checkRequestOp(u); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := checkRequestDupKeys(r.Success); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, u := range r.Failure {
|
||||
if err := checkRequestOp(u); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return checkRequestDupKeys(r.Failure)
|
||||
}
|
||||
|
||||
// checkRequestDupKeys gives rpctypes.ErrGRPCDuplicateKey if the same key is modified twice
|
||||
func checkRequestDupKeys(reqs []*pb.RequestOp) error {
|
||||
// check put overlap
|
||||
keys := make(map[string]struct{})
|
||||
for _, requ := range reqs {
|
||||
tv, ok := requ.Request.(*pb.RequestOp_RequestPut)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
preq := tv.RequestPut
|
||||
if preq == nil {
|
||||
continue
|
||||
}
|
||||
if _, ok := keys[string(preq.Key)]; ok {
|
||||
return rpctypes.ErrGRPCDuplicateKey
|
||||
}
|
||||
keys[string(preq.Key)] = struct{}{}
|
||||
}
|
||||
|
||||
// no need to check deletes if no puts; delete overlaps are permitted
|
||||
if len(keys) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// sort keys for range checking
|
||||
sortedKeys := []string{}
|
||||
for k := range keys {
|
||||
sortedKeys = append(sortedKeys, k)
|
||||
}
|
||||
sort.Strings(sortedKeys)
|
||||
|
||||
// check put overlap with deletes
|
||||
for _, requ := range reqs {
|
||||
tv, ok := requ.Request.(*pb.RequestOp_RequestDeleteRange)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
dreq := tv.RequestDeleteRange
|
||||
if dreq == nil {
|
||||
continue
|
||||
}
|
||||
if dreq.RangeEnd == nil {
|
||||
if _, found := keys[string(dreq.Key)]; found {
|
||||
return rpctypes.ErrGRPCDuplicateKey
|
||||
}
|
||||
} else {
|
||||
lo := sort.SearchStrings(sortedKeys, string(dreq.Key))
|
||||
hi := sort.SearchStrings(sortedKeys, string(dreq.RangeEnd))
|
||||
if lo != hi {
|
||||
// element between lo and hi => overlap
|
||||
return rpctypes.ErrGRPCDuplicateKey
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkRequestOp(u *pb.RequestOp) error {
|
||||
// TODO: ensure only one of the field is set.
|
||||
switch uv := u.Request.(type) {
|
||||
case *pb.RequestOp_RequestRange:
|
||||
if uv.RequestRange != nil {
|
||||
return checkRangeRequest(uv.RequestRange)
|
||||
}
|
||||
case *pb.RequestOp_RequestPut:
|
||||
if uv.RequestPut != nil {
|
||||
return checkPutRequest(uv.RequestPut)
|
||||
}
|
||||
case *pb.RequestOp_RequestDeleteRange:
|
||||
if uv.RequestDeleteRange != nil {
|
||||
return checkDeleteRequest(uv.RequestDeleteRange)
|
||||
}
|
||||
default:
|
||||
// empty op / nil entry
|
||||
return rpctypes.ErrGRPCKeyNotFound
|
||||
}
|
||||
return nil
|
||||
}
|
123
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go
generated
vendored
Normal file
123
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/lease.go
generated
vendored
Normal file
|
@ -0,0 +1,123 @@
|
|||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/lease"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type LeaseServer struct {
|
||||
hdr header
|
||||
le etcdserver.Lessor
|
||||
}
|
||||
|
||||
func NewLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer {
|
||||
return &LeaseServer{le: s, hdr: newHeader(s)}
|
||||
}
|
||||
|
||||
func (ls *LeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
|
||||
resp, err := ls.le.LeaseGrant(ctx, cr)
|
||||
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
ls.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (ls *LeaseServer) LeaseRevoke(ctx context.Context, rr *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
|
||||
resp, err := ls.le.LeaseRevoke(ctx, rr)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
ls.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (ls *LeaseServer) LeaseTimeToLive(ctx context.Context, rr *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
|
||||
resp, err := ls.le.LeaseTimeToLive(ctx, rr)
|
||||
if err != nil && err != lease.ErrLeaseNotFound {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
if err == lease.ErrLeaseNotFound {
|
||||
resp = &pb.LeaseTimeToLiveResponse{
|
||||
Header: &pb.ResponseHeader{},
|
||||
ID: rr.ID,
|
||||
TTL: -1,
|
||||
}
|
||||
}
|
||||
ls.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (ls *LeaseServer) LeaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) (err error) {
|
||||
errc := make(chan error, 1)
|
||||
go func() {
|
||||
errc <- ls.leaseKeepAlive(stream)
|
||||
}()
|
||||
select {
|
||||
case err = <-errc:
|
||||
case <-stream.Context().Done():
|
||||
// the only server-side cancellation is noleader for now.
|
||||
err = stream.Context().Err()
|
||||
if err == context.Canceled {
|
||||
err = rpctypes.ErrGRPCNoLeader
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func (ls *LeaseServer) leaseKeepAlive(stream pb.Lease_LeaseKeepAliveServer) error {
|
||||
for {
|
||||
req, err := stream.Recv()
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Create header before we sent out the renew request.
|
||||
// This can make sure that the revision is strictly smaller or equal to
|
||||
// when the keepalive happened at the local server (when the local server is the leader)
|
||||
// or remote leader.
|
||||
// Without this, a lease might be revoked at rev 3 but client can see the keepalive succeeded
|
||||
// at rev 4.
|
||||
resp := &pb.LeaseKeepAliveResponse{ID: req.ID, Header: &pb.ResponseHeader{}}
|
||||
ls.hdr.fill(resp.Header)
|
||||
|
||||
ttl, err := ls.le.LeaseRenew(stream.Context(), lease.LeaseID(req.ID))
|
||||
if err == lease.ErrLeaseNotFound {
|
||||
err = nil
|
||||
ttl = 0
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return togRPCError(err)
|
||||
}
|
||||
|
||||
resp.TTL = ttl
|
||||
err = stream.Send(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
190
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go
generated
vendored
Normal file
190
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/maintenance.go
generated
vendored
Normal file
|
@ -0,0 +1,190 @@
|
|||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"crypto/sha256"
|
||||
"io"
|
||||
|
||||
"github.com/coreos/etcd/auth"
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
"github.com/coreos/etcd/mvcc/backend"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"github.com/coreos/etcd/version"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type KVGetter interface {
|
||||
KV() mvcc.ConsistentWatchableKV
|
||||
}
|
||||
|
||||
type BackendGetter interface {
|
||||
Backend() backend.Backend
|
||||
}
|
||||
|
||||
type Alarmer interface {
|
||||
Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error)
|
||||
}
|
||||
|
||||
type RaftStatusGetter interface {
|
||||
Index() uint64
|
||||
Term() uint64
|
||||
Leader() types.ID
|
||||
}
|
||||
|
||||
type AuthGetter interface {
|
||||
AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error)
|
||||
AuthStore() auth.AuthStore
|
||||
}
|
||||
|
||||
type maintenanceServer struct {
|
||||
rg RaftStatusGetter
|
||||
kg KVGetter
|
||||
bg BackendGetter
|
||||
a Alarmer
|
||||
hdr header
|
||||
}
|
||||
|
||||
func NewMaintenanceServer(s *etcdserver.EtcdServer) pb.MaintenanceServer {
|
||||
srv := &maintenanceServer{rg: s, kg: s, bg: s, a: s, hdr: newHeader(s)}
|
||||
return &authMaintenanceServer{srv, s}
|
||||
}
|
||||
|
||||
func (ms *maintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
|
||||
plog.Noticef("starting to defragment the storage backend...")
|
||||
err := ms.bg.Backend().Defrag()
|
||||
if err != nil {
|
||||
plog.Errorf("failed to defragment the storage backend (%v)", err)
|
||||
return nil, err
|
||||
}
|
||||
plog.Noticef("finished defragmenting the storage backend")
|
||||
return &pb.DefragmentResponse{}, nil
|
||||
}
|
||||
|
||||
func (ms *maintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error {
|
||||
snap := ms.bg.Backend().Snapshot()
|
||||
pr, pw := io.Pipe()
|
||||
|
||||
defer pr.Close()
|
||||
|
||||
go func() {
|
||||
snap.WriteTo(pw)
|
||||
if err := snap.Close(); err != nil {
|
||||
plog.Errorf("error closing snapshot (%v)", err)
|
||||
}
|
||||
pw.Close()
|
||||
}()
|
||||
|
||||
// send file data
|
||||
h := sha256.New()
|
||||
br := int64(0)
|
||||
buf := make([]byte, 32*1024)
|
||||
sz := snap.Size()
|
||||
for br < sz {
|
||||
n, err := io.ReadFull(pr, buf)
|
||||
if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
|
||||
return togRPCError(err)
|
||||
}
|
||||
br += int64(n)
|
||||
resp := &pb.SnapshotResponse{
|
||||
RemainingBytes: uint64(sz - br),
|
||||
Blob: buf[:n],
|
||||
}
|
||||
if err = srv.Send(resp); err != nil {
|
||||
return togRPCError(err)
|
||||
}
|
||||
h.Write(buf[:n])
|
||||
}
|
||||
|
||||
// send sha
|
||||
sha := h.Sum(nil)
|
||||
hresp := &pb.SnapshotResponse{RemainingBytes: 0, Blob: sha}
|
||||
if err := srv.Send(hresp); err != nil {
|
||||
return togRPCError(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ms *maintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
|
||||
h, rev, err := ms.kg.KV().Hash()
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
resp := &pb.HashResponse{Header: &pb.ResponseHeader{Revision: rev}, Hash: h}
|
||||
ms.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (ms *maintenanceServer) Alarm(ctx context.Context, ar *pb.AlarmRequest) (*pb.AlarmResponse, error) {
|
||||
return ms.a.Alarm(ctx, ar)
|
||||
}
|
||||
|
||||
func (ms *maintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) {
|
||||
resp := &pb.StatusResponse{
|
||||
Header: &pb.ResponseHeader{Revision: ms.hdr.rev()},
|
||||
Version: version.Version,
|
||||
DbSize: ms.bg.Backend().Size(),
|
||||
Leader: uint64(ms.rg.Leader()),
|
||||
RaftIndex: ms.rg.Index(),
|
||||
RaftTerm: ms.rg.Term(),
|
||||
}
|
||||
ms.hdr.fill(resp.Header)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
type authMaintenanceServer struct {
|
||||
*maintenanceServer
|
||||
ag AuthGetter
|
||||
}
|
||||
|
||||
func (ams *authMaintenanceServer) isAuthenticated(ctx context.Context) error {
|
||||
authInfo, err := ams.ag.AuthInfoFromCtx(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ams.ag.AuthStore().IsAdminPermitted(authInfo)
|
||||
}
|
||||
|
||||
func (ams *authMaintenanceServer) Defragment(ctx context.Context, sr *pb.DefragmentRequest) (*pb.DefragmentResponse, error) {
|
||||
if err := ams.isAuthenticated(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ams.maintenanceServer.Defragment(ctx, sr)
|
||||
}
|
||||
|
||||
func (ams *authMaintenanceServer) Snapshot(sr *pb.SnapshotRequest, srv pb.Maintenance_SnapshotServer) error {
|
||||
if err := ams.isAuthenticated(srv.Context()); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ams.maintenanceServer.Snapshot(sr, srv)
|
||||
}
|
||||
|
||||
func (ams *authMaintenanceServer) Hash(ctx context.Context, r *pb.HashRequest) (*pb.HashResponse, error) {
|
||||
if err := ams.isAuthenticated(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return ams.maintenanceServer.Hash(ctx, r)
|
||||
}
|
||||
|
||||
func (ams *authMaintenanceServer) Status(ctx context.Context, ar *pb.StatusRequest) (*pb.StatusResponse, error) {
|
||||
return ams.maintenanceServer.Status(ctx, ar)
|
||||
}
|
103
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go
generated
vendored
Normal file
103
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/member.go
generated
vendored
Normal file
|
@ -0,0 +1,103 @@
|
|||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/etcdserver/membership"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type ClusterServer struct {
|
||||
cluster api.Cluster
|
||||
server etcdserver.Server
|
||||
raftTimer etcdserver.RaftTimer
|
||||
}
|
||||
|
||||
func NewClusterServer(s *etcdserver.EtcdServer) *ClusterServer {
|
||||
return &ClusterServer{
|
||||
cluster: s.Cluster(),
|
||||
server: s,
|
||||
raftTimer: s,
|
||||
}
|
||||
}
|
||||
|
||||
func (cs *ClusterServer) MemberAdd(ctx context.Context, r *pb.MemberAddRequest) (*pb.MemberAddResponse, error) {
|
||||
urls, err := types.NewURLs(r.PeerURLs)
|
||||
if err != nil {
|
||||
return nil, rpctypes.ErrGRPCMemberBadURLs
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
m := membership.NewMember("", urls, "", &now)
|
||||
membs, merr := cs.server.AddMember(ctx, *m)
|
||||
if merr != nil {
|
||||
return nil, togRPCError(merr)
|
||||
}
|
||||
|
||||
return &pb.MemberAddResponse{
|
||||
Header: cs.header(),
|
||||
Member: &pb.Member{ID: uint64(m.ID), PeerURLs: m.PeerURLs},
|
||||
Members: membersToProtoMembers(membs),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (cs *ClusterServer) MemberRemove(ctx context.Context, r *pb.MemberRemoveRequest) (*pb.MemberRemoveResponse, error) {
|
||||
membs, err := cs.server.RemoveMember(ctx, r.ID)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return &pb.MemberRemoveResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil
|
||||
}
|
||||
|
||||
func (cs *ClusterServer) MemberUpdate(ctx context.Context, r *pb.MemberUpdateRequest) (*pb.MemberUpdateResponse, error) {
|
||||
m := membership.Member{
|
||||
ID: types.ID(r.ID),
|
||||
RaftAttributes: membership.RaftAttributes{PeerURLs: r.PeerURLs},
|
||||
}
|
||||
membs, err := cs.server.UpdateMember(ctx, m)
|
||||
if err != nil {
|
||||
return nil, togRPCError(err)
|
||||
}
|
||||
return &pb.MemberUpdateResponse{Header: cs.header(), Members: membersToProtoMembers(membs)}, nil
|
||||
}
|
||||
|
||||
func (cs *ClusterServer) MemberList(ctx context.Context, r *pb.MemberListRequest) (*pb.MemberListResponse, error) {
|
||||
membs := membersToProtoMembers(cs.cluster.Members())
|
||||
return &pb.MemberListResponse{Header: cs.header(), Members: membs}, nil
|
||||
}
|
||||
|
||||
func (cs *ClusterServer) header() *pb.ResponseHeader {
|
||||
return &pb.ResponseHeader{ClusterId: uint64(cs.cluster.ID()), MemberId: uint64(cs.server.ID()), RaftTerm: cs.raftTimer.Term()}
|
||||
}
|
||||
|
||||
func membersToProtoMembers(membs []*membership.Member) []*pb.Member {
|
||||
protoMembs := make([]*pb.Member, len(membs))
|
||||
for i := range membs {
|
||||
protoMembs[i] = &pb.Member{
|
||||
Name: membs[i].Name,
|
||||
ID: uint64(membs[i].ID),
|
||||
PeerURLs: membs[i].PeerURLs,
|
||||
ClientURLs: membs[i].ClientURLs,
|
||||
}
|
||||
}
|
||||
return protoMembs
|
||||
}
|
38
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go
generated
vendored
Normal file
38
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/metrics.go
generated
vendored
Normal file
|
@ -0,0 +1,38 @@
|
|||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import "github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
var (
|
||||
sentBytes = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "network",
|
||||
Name: "client_grpc_sent_bytes_total",
|
||||
Help: "The total number of bytes sent to grpc clients.",
|
||||
})
|
||||
|
||||
receivedBytes = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "network",
|
||||
Name: "client_grpc_received_bytes_total",
|
||||
Help: "The total number of bytes received from grpc clients.",
|
||||
})
|
||||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(sentBytes)
|
||||
prometheus.MustRegister(receivedBytes)
|
||||
}
|
89
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go
generated
vendored
Normal file
89
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/quota.go
generated
vendored
Normal file
|
@ -0,0 +1,89 @@
|
|||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type quotaKVServer struct {
|
||||
pb.KVServer
|
||||
qa quotaAlarmer
|
||||
}
|
||||
|
||||
type quotaAlarmer struct {
|
||||
q etcdserver.Quota
|
||||
a Alarmer
|
||||
id types.ID
|
||||
}
|
||||
|
||||
// check whether request satisfies the quota. If there is not enough space,
|
||||
// ignore request and raise the free space alarm.
|
||||
func (qa *quotaAlarmer) check(ctx context.Context, r interface{}) error {
|
||||
if qa.q.Available(r) {
|
||||
return nil
|
||||
}
|
||||
req := &pb.AlarmRequest{
|
||||
MemberID: uint64(qa.id),
|
||||
Action: pb.AlarmRequest_ACTIVATE,
|
||||
Alarm: pb.AlarmType_NOSPACE,
|
||||
}
|
||||
qa.a.Alarm(ctx, req)
|
||||
return rpctypes.ErrGRPCNoSpace
|
||||
}
|
||||
|
||||
func NewQuotaKVServer(s *etcdserver.EtcdServer) pb.KVServer {
|
||||
return "aKVServer{
|
||||
NewKVServer(s),
|
||||
quotaAlarmer{etcdserver.NewBackendQuota(s), s, s.ID()},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *quotaKVServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
|
||||
if err := s.qa.check(ctx, r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s.KVServer.Put(ctx, r)
|
||||
}
|
||||
|
||||
func (s *quotaKVServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
if err := s.qa.check(ctx, r); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s.KVServer.Txn(ctx, r)
|
||||
}
|
||||
|
||||
type quotaLeaseServer struct {
|
||||
pb.LeaseServer
|
||||
qa quotaAlarmer
|
||||
}
|
||||
|
||||
func (s *quotaLeaseServer) LeaseGrant(ctx context.Context, cr *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
|
||||
if err := s.qa.check(ctx, cr); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return s.LeaseServer.LeaseGrant(ctx, cr)
|
||||
}
|
||||
|
||||
func NewQuotaLeaseServer(s *etcdserver.EtcdServer) pb.LeaseServer {
|
||||
return "aLeaseServer{
|
||||
NewLeaseServer(s),
|
||||
quotaAlarmer{etcdserver.NewBackendQuota(s), s, s.ID()},
|
||||
}
|
||||
}
|
103
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go
generated
vendored
Normal file
103
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/util.go
generated
vendored
Normal file
|
@ -0,0 +1,103 @@
|
|||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"github.com/coreos/etcd/auth"
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
"github.com/coreos/etcd/etcdserver/membership"
|
||||
"github.com/coreos/etcd/lease"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
func togRPCError(err error) error {
|
||||
switch err {
|
||||
case membership.ErrIDRemoved:
|
||||
return rpctypes.ErrGRPCMemberNotFound
|
||||
case membership.ErrIDNotFound:
|
||||
return rpctypes.ErrGRPCMemberNotFound
|
||||
case membership.ErrIDExists:
|
||||
return rpctypes.ErrGRPCMemberExist
|
||||
case membership.ErrPeerURLexists:
|
||||
return rpctypes.ErrGRPCPeerURLExist
|
||||
case etcdserver.ErrNotEnoughStartedMembers:
|
||||
return rpctypes.ErrMemberNotEnoughStarted
|
||||
|
||||
case mvcc.ErrCompacted:
|
||||
return rpctypes.ErrGRPCCompacted
|
||||
case mvcc.ErrFutureRev:
|
||||
return rpctypes.ErrGRPCFutureRev
|
||||
case etcdserver.ErrRequestTooLarge:
|
||||
return rpctypes.ErrGRPCRequestTooLarge
|
||||
case etcdserver.ErrNoSpace:
|
||||
return rpctypes.ErrGRPCNoSpace
|
||||
case etcdserver.ErrTooManyRequests:
|
||||
return rpctypes.ErrTooManyRequests
|
||||
|
||||
case etcdserver.ErrNoLeader:
|
||||
return rpctypes.ErrGRPCNoLeader
|
||||
case etcdserver.ErrStopped:
|
||||
return rpctypes.ErrGRPCStopped
|
||||
case etcdserver.ErrTimeout:
|
||||
return rpctypes.ErrGRPCTimeout
|
||||
case etcdserver.ErrTimeoutDueToLeaderFail:
|
||||
return rpctypes.ErrGRPCTimeoutDueToLeaderFail
|
||||
case etcdserver.ErrTimeoutDueToConnectionLost:
|
||||
return rpctypes.ErrGRPCTimeoutDueToConnectionLost
|
||||
case etcdserver.ErrUnhealthy:
|
||||
return rpctypes.ErrGRPCUnhealthy
|
||||
case etcdserver.ErrKeyNotFound:
|
||||
return rpctypes.ErrGRPCKeyNotFound
|
||||
|
||||
case lease.ErrLeaseNotFound:
|
||||
return rpctypes.ErrGRPCLeaseNotFound
|
||||
case lease.ErrLeaseExists:
|
||||
return rpctypes.ErrGRPCLeaseExist
|
||||
|
||||
case auth.ErrRootUserNotExist:
|
||||
return rpctypes.ErrGRPCRootUserNotExist
|
||||
case auth.ErrRootRoleNotExist:
|
||||
return rpctypes.ErrGRPCRootRoleNotExist
|
||||
case auth.ErrUserAlreadyExist:
|
||||
return rpctypes.ErrGRPCUserAlreadyExist
|
||||
case auth.ErrUserEmpty:
|
||||
return rpctypes.ErrGRPCUserEmpty
|
||||
case auth.ErrUserNotFound:
|
||||
return rpctypes.ErrGRPCUserNotFound
|
||||
case auth.ErrRoleAlreadyExist:
|
||||
return rpctypes.ErrGRPCRoleAlreadyExist
|
||||
case auth.ErrRoleNotFound:
|
||||
return rpctypes.ErrGRPCRoleNotFound
|
||||
case auth.ErrAuthFailed:
|
||||
return rpctypes.ErrGRPCAuthFailed
|
||||
case auth.ErrPermissionDenied:
|
||||
return rpctypes.ErrGRPCPermissionDenied
|
||||
case auth.ErrRoleNotGranted:
|
||||
return rpctypes.ErrGRPCRoleNotGranted
|
||||
case auth.ErrPermissionNotGranted:
|
||||
return rpctypes.ErrGRPCPermissionNotGranted
|
||||
case auth.ErrAuthNotEnabled:
|
||||
return rpctypes.ErrGRPCAuthNotEnabled
|
||||
case auth.ErrInvalidAuthToken:
|
||||
return rpctypes.ErrGRPCInvalidAuthToken
|
||||
case auth.ErrInvalidAuthMgmt:
|
||||
return rpctypes.ErrGRPCInvalidAuthMgmt
|
||||
default:
|
||||
return grpc.Errorf(codes.Unknown, err.Error())
|
||||
}
|
||||
}
|
426
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go
generated
vendored
Normal file
426
vendor/github.com/coreos/etcd/etcdserver/api/v3rpc/watch.go
generated
vendored
Normal file
|
@ -0,0 +1,426 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package v3rpc
|
||||
|
||||
import (
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/coreos/etcd/auth"
|
||||
"github.com/coreos/etcd/etcdserver"
|
||||
"github.com/coreos/etcd/etcdserver/api/v3rpc/rpctypes"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||
)
|
||||
|
||||
type watchServer struct {
|
||||
clusterID int64
|
||||
memberID int64
|
||||
raftTimer etcdserver.RaftTimer
|
||||
watchable mvcc.WatchableKV
|
||||
|
||||
ag AuthGetter
|
||||
}
|
||||
|
||||
func NewWatchServer(s *etcdserver.EtcdServer) pb.WatchServer {
|
||||
return &watchServer{
|
||||
clusterID: int64(s.Cluster().ID()),
|
||||
memberID: int64(s.ID()),
|
||||
raftTimer: s,
|
||||
watchable: s.Watchable(),
|
||||
ag: s,
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// External test can read this with GetProgressReportInterval()
|
||||
// and change this to a small value to finish fast with
|
||||
// SetProgressReportInterval().
|
||||
progressReportInterval = 10 * time.Minute
|
||||
progressReportIntervalMu sync.RWMutex
|
||||
)
|
||||
|
||||
func GetProgressReportInterval() time.Duration {
|
||||
progressReportIntervalMu.RLock()
|
||||
defer progressReportIntervalMu.RUnlock()
|
||||
return progressReportInterval
|
||||
}
|
||||
|
||||
func SetProgressReportInterval(newTimeout time.Duration) {
|
||||
progressReportIntervalMu.Lock()
|
||||
defer progressReportIntervalMu.Unlock()
|
||||
progressReportInterval = newTimeout
|
||||
}
|
||||
|
||||
const (
|
||||
// We send ctrl response inside the read loop. We do not want
|
||||
// send to block read, but we still want ctrl response we sent to
|
||||
// be serialized. Thus we use a buffered chan to solve the problem.
|
||||
// A small buffer should be OK for most cases, since we expect the
|
||||
// ctrl requests are infrequent.
|
||||
ctrlStreamBufLen = 16
|
||||
)
|
||||
|
||||
// serverWatchStream is an etcd server side stream. It receives requests
|
||||
// from client side gRPC stream. It receives watch events from mvcc.WatchStream,
|
||||
// and creates responses that forwarded to gRPC stream.
|
||||
// It also forwards control message like watch created and canceled.
|
||||
type serverWatchStream struct {
|
||||
clusterID int64
|
||||
memberID int64
|
||||
raftTimer etcdserver.RaftTimer
|
||||
|
||||
watchable mvcc.WatchableKV
|
||||
|
||||
gRPCStream pb.Watch_WatchServer
|
||||
watchStream mvcc.WatchStream
|
||||
ctrlStream chan *pb.WatchResponse
|
||||
|
||||
// mu protects progress, prevKV
|
||||
mu sync.Mutex
|
||||
// progress tracks the watchID that stream might need to send
|
||||
// progress to.
|
||||
// TODO: combine progress and prevKV into a single struct?
|
||||
progress map[mvcc.WatchID]bool
|
||||
prevKV map[mvcc.WatchID]bool
|
||||
|
||||
// closec indicates the stream is closed.
|
||||
closec chan struct{}
|
||||
|
||||
// wg waits for the send loop to complete
|
||||
wg sync.WaitGroup
|
||||
|
||||
ag AuthGetter
|
||||
}
|
||||
|
||||
func (ws *watchServer) Watch(stream pb.Watch_WatchServer) (err error) {
|
||||
sws := serverWatchStream{
|
||||
clusterID: ws.clusterID,
|
||||
memberID: ws.memberID,
|
||||
raftTimer: ws.raftTimer,
|
||||
|
||||
watchable: ws.watchable,
|
||||
|
||||
gRPCStream: stream,
|
||||
watchStream: ws.watchable.NewWatchStream(),
|
||||
// chan for sending control response like watcher created and canceled.
|
||||
ctrlStream: make(chan *pb.WatchResponse, ctrlStreamBufLen),
|
||||
progress: make(map[mvcc.WatchID]bool),
|
||||
prevKV: make(map[mvcc.WatchID]bool),
|
||||
closec: make(chan struct{}),
|
||||
|
||||
ag: ws.ag,
|
||||
}
|
||||
|
||||
sws.wg.Add(1)
|
||||
go func() {
|
||||
sws.sendLoop()
|
||||
sws.wg.Done()
|
||||
}()
|
||||
|
||||
errc := make(chan error, 1)
|
||||
// Ideally recvLoop would also use sws.wg to signal its completion
|
||||
// but when stream.Context().Done() is closed, the stream's recv
|
||||
// may continue to block since it uses a different context, leading to
|
||||
// deadlock when calling sws.close().
|
||||
go func() {
|
||||
if rerr := sws.recvLoop(); rerr != nil {
|
||||
errc <- rerr
|
||||
}
|
||||
}()
|
||||
select {
|
||||
case err = <-errc:
|
||||
close(sws.ctrlStream)
|
||||
case <-stream.Context().Done():
|
||||
err = stream.Context().Err()
|
||||
// the only server-side cancellation is noleader for now.
|
||||
if err == context.Canceled {
|
||||
err = rpctypes.ErrGRPCNoLeader
|
||||
}
|
||||
}
|
||||
sws.close()
|
||||
return err
|
||||
}
|
||||
|
||||
func (sws *serverWatchStream) isWatchPermitted(wcr *pb.WatchCreateRequest) bool {
|
||||
authInfo, err := sws.ag.AuthInfoFromCtx(sws.gRPCStream.Context())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if authInfo == nil {
|
||||
// if auth is enabled, IsRangePermitted() can cause an error
|
||||
authInfo = &auth.AuthInfo{}
|
||||
}
|
||||
|
||||
return sws.ag.AuthStore().IsRangePermitted(authInfo, wcr.Key, wcr.RangeEnd) == nil
|
||||
}
|
||||
|
||||
func (sws *serverWatchStream) recvLoop() error {
|
||||
for {
|
||||
req, err := sws.gRPCStream.Recv()
|
||||
if err == io.EOF {
|
||||
return nil
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch uv := req.RequestUnion.(type) {
|
||||
case *pb.WatchRequest_CreateRequest:
|
||||
if uv.CreateRequest == nil {
|
||||
break
|
||||
}
|
||||
|
||||
creq := uv.CreateRequest
|
||||
if len(creq.Key) == 0 {
|
||||
// \x00 is the smallest key
|
||||
creq.Key = []byte{0}
|
||||
}
|
||||
if len(creq.RangeEnd) == 0 {
|
||||
// force nil since watchstream.Watch distinguishes
|
||||
// between nil and []byte{} for single key / >=
|
||||
creq.RangeEnd = nil
|
||||
}
|
||||
if len(creq.RangeEnd) == 1 && creq.RangeEnd[0] == 0 {
|
||||
// support >= key queries
|
||||
creq.RangeEnd = []byte{}
|
||||
}
|
||||
|
||||
if !sws.isWatchPermitted(creq) {
|
||||
wr := &pb.WatchResponse{
|
||||
Header: sws.newResponseHeader(sws.watchStream.Rev()),
|
||||
WatchId: -1,
|
||||
Canceled: true,
|
||||
Created: true,
|
||||
CancelReason: rpctypes.ErrGRPCPermissionDenied.Error(),
|
||||
}
|
||||
|
||||
select {
|
||||
case sws.ctrlStream <- wr:
|
||||
case <-sws.closec:
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
filters := FiltersFromRequest(creq)
|
||||
|
||||
wsrev := sws.watchStream.Rev()
|
||||
rev := creq.StartRevision
|
||||
if rev == 0 {
|
||||
rev = wsrev + 1
|
||||
}
|
||||
id := sws.watchStream.Watch(creq.Key, creq.RangeEnd, rev, filters...)
|
||||
if id != -1 {
|
||||
sws.mu.Lock()
|
||||
if creq.ProgressNotify {
|
||||
sws.progress[id] = true
|
||||
}
|
||||
if creq.PrevKv {
|
||||
sws.prevKV[id] = true
|
||||
}
|
||||
sws.mu.Unlock()
|
||||
}
|
||||
wr := &pb.WatchResponse{
|
||||
Header: sws.newResponseHeader(wsrev),
|
||||
WatchId: int64(id),
|
||||
Created: true,
|
||||
Canceled: id == -1,
|
||||
}
|
||||
select {
|
||||
case sws.ctrlStream <- wr:
|
||||
case <-sws.closec:
|
||||
return nil
|
||||
}
|
||||
case *pb.WatchRequest_CancelRequest:
|
||||
if uv.CancelRequest != nil {
|
||||
id := uv.CancelRequest.WatchId
|
||||
err := sws.watchStream.Cancel(mvcc.WatchID(id))
|
||||
if err == nil {
|
||||
sws.ctrlStream <- &pb.WatchResponse{
|
||||
Header: sws.newResponseHeader(sws.watchStream.Rev()),
|
||||
WatchId: id,
|
||||
Canceled: true,
|
||||
}
|
||||
sws.mu.Lock()
|
||||
delete(sws.progress, mvcc.WatchID(id))
|
||||
delete(sws.prevKV, mvcc.WatchID(id))
|
||||
sws.mu.Unlock()
|
||||
}
|
||||
}
|
||||
default:
|
||||
// we probably should not shutdown the entire stream when
|
||||
// receive an valid command.
|
||||
// so just do nothing instead.
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sws *serverWatchStream) sendLoop() {
|
||||
// watch ids that are currently active
|
||||
ids := make(map[mvcc.WatchID]struct{})
|
||||
// watch responses pending on a watch id creation message
|
||||
pending := make(map[mvcc.WatchID][]*pb.WatchResponse)
|
||||
|
||||
interval := GetProgressReportInterval()
|
||||
progressTicker := time.NewTicker(interval)
|
||||
|
||||
defer func() {
|
||||
progressTicker.Stop()
|
||||
// drain the chan to clean up pending events
|
||||
for ws := range sws.watchStream.Chan() {
|
||||
mvcc.ReportEventReceived(len(ws.Events))
|
||||
}
|
||||
for _, wrs := range pending {
|
||||
for _, ws := range wrs {
|
||||
mvcc.ReportEventReceived(len(ws.Events))
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
for {
|
||||
select {
|
||||
case wresp, ok := <-sws.watchStream.Chan():
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
// TODO: evs is []mvccpb.Event type
|
||||
// either return []*mvccpb.Event from the mvcc package
|
||||
// or define protocol buffer with []mvccpb.Event.
|
||||
evs := wresp.Events
|
||||
events := make([]*mvccpb.Event, len(evs))
|
||||
sws.mu.Lock()
|
||||
needPrevKV := sws.prevKV[wresp.WatchID]
|
||||
sws.mu.Unlock()
|
||||
for i := range evs {
|
||||
events[i] = &evs[i]
|
||||
|
||||
if needPrevKV {
|
||||
opt := mvcc.RangeOptions{Rev: evs[i].Kv.ModRevision - 1}
|
||||
r, err := sws.watchable.Range(evs[i].Kv.Key, nil, opt)
|
||||
if err == nil && len(r.KVs) != 0 {
|
||||
events[i].PrevKv = &(r.KVs[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
wr := &pb.WatchResponse{
|
||||
Header: sws.newResponseHeader(wresp.Revision),
|
||||
WatchId: int64(wresp.WatchID),
|
||||
Events: events,
|
||||
CompactRevision: wresp.CompactRevision,
|
||||
}
|
||||
|
||||
if _, hasId := ids[wresp.WatchID]; !hasId {
|
||||
// buffer if id not yet announced
|
||||
wrs := append(pending[wresp.WatchID], wr)
|
||||
pending[wresp.WatchID] = wrs
|
||||
continue
|
||||
}
|
||||
|
||||
mvcc.ReportEventReceived(len(evs))
|
||||
if err := sws.gRPCStream.Send(wr); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
sws.mu.Lock()
|
||||
if len(evs) > 0 && sws.progress[wresp.WatchID] {
|
||||
// elide next progress update if sent a key update
|
||||
sws.progress[wresp.WatchID] = false
|
||||
}
|
||||
sws.mu.Unlock()
|
||||
|
||||
case c, ok := <-sws.ctrlStream:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
if err := sws.gRPCStream.Send(c); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// track id creation
|
||||
wid := mvcc.WatchID(c.WatchId)
|
||||
if c.Canceled {
|
||||
delete(ids, wid)
|
||||
continue
|
||||
}
|
||||
if c.Created {
|
||||
// flush buffered events
|
||||
ids[wid] = struct{}{}
|
||||
for _, v := range pending[wid] {
|
||||
mvcc.ReportEventReceived(len(v.Events))
|
||||
if err := sws.gRPCStream.Send(v); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
delete(pending, wid)
|
||||
}
|
||||
case <-progressTicker.C:
|
||||
sws.mu.Lock()
|
||||
for id, ok := range sws.progress {
|
||||
if ok {
|
||||
sws.watchStream.RequestProgress(id)
|
||||
}
|
||||
sws.progress[id] = true
|
||||
}
|
||||
sws.mu.Unlock()
|
||||
case <-sws.closec:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (sws *serverWatchStream) close() {
|
||||
sws.watchStream.Close()
|
||||
close(sws.closec)
|
||||
sws.wg.Wait()
|
||||
}
|
||||
|
||||
func (sws *serverWatchStream) newResponseHeader(rev int64) *pb.ResponseHeader {
|
||||
return &pb.ResponseHeader{
|
||||
ClusterId: uint64(sws.clusterID),
|
||||
MemberId: uint64(sws.memberID),
|
||||
Revision: rev,
|
||||
RaftTerm: sws.raftTimer.Term(),
|
||||
}
|
||||
}
|
||||
|
||||
func filterNoDelete(e mvccpb.Event) bool {
|
||||
return e.Type == mvccpb.DELETE
|
||||
}
|
||||
|
||||
func filterNoPut(e mvccpb.Event) bool {
|
||||
return e.Type == mvccpb.PUT
|
||||
}
|
||||
|
||||
func FiltersFromRequest(creq *pb.WatchCreateRequest) []mvcc.FilterFunc {
|
||||
filters := make([]mvcc.FilterFunc, 0, len(creq.Filters))
|
||||
for _, ft := range creq.Filters {
|
||||
switch ft {
|
||||
case pb.WatchCreateRequest_NOPUT:
|
||||
filters = append(filters, filterNoPut)
|
||||
case pb.WatchCreateRequest_NODELETE:
|
||||
filters = append(filters, filterNoDelete)
|
||||
default:
|
||||
}
|
||||
}
|
||||
return filters
|
||||
}
|
878
vendor/github.com/coreos/etcd/etcdserver/apply.go
generated
vendored
Normal file
878
vendor/github.com/coreos/etcd/etcdserver/apply.go
generated
vendored
Normal file
|
@ -0,0 +1,878 @@
|
|||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/lease"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
"github.com/coreos/etcd/mvcc/mvccpb"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const (
|
||||
warnApplyDuration = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
type applyResult struct {
|
||||
resp proto.Message
|
||||
err error
|
||||
// physc signals the physical effect of the request has completed in addition
|
||||
// to being logically reflected by the node. Currently only used for
|
||||
// Compaction requests.
|
||||
physc <-chan struct{}
|
||||
}
|
||||
|
||||
// applierV3 is the interface for processing V3 raft messages
|
||||
type applierV3 interface {
|
||||
Apply(r *pb.InternalRaftRequest) *applyResult
|
||||
|
||||
Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error)
|
||||
Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error)
|
||||
DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error)
|
||||
Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error)
|
||||
Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error)
|
||||
|
||||
LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error)
|
||||
LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error)
|
||||
|
||||
Alarm(*pb.AlarmRequest) (*pb.AlarmResponse, error)
|
||||
|
||||
Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error)
|
||||
|
||||
AuthEnable() (*pb.AuthEnableResponse, error)
|
||||
AuthDisable() (*pb.AuthDisableResponse, error)
|
||||
|
||||
UserAdd(ua *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error)
|
||||
UserDelete(ua *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error)
|
||||
UserChangePassword(ua *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error)
|
||||
UserGrantRole(ua *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error)
|
||||
UserGet(ua *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error)
|
||||
UserRevokeRole(ua *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error)
|
||||
RoleAdd(ua *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error)
|
||||
RoleGrantPermission(ua *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error)
|
||||
RoleGet(ua *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error)
|
||||
RoleRevokePermission(ua *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error)
|
||||
RoleDelete(ua *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error)
|
||||
UserList(ua *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error)
|
||||
RoleList(ua *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
|
||||
}
|
||||
|
||||
type applierV3backend struct {
|
||||
s *EtcdServer
|
||||
}
|
||||
|
||||
func (s *EtcdServer) newApplierV3() applierV3 {
|
||||
return newAuthApplierV3(
|
||||
s.AuthStore(),
|
||||
newQuotaApplierV3(s, &applierV3backend{s}),
|
||||
)
|
||||
}
|
||||
|
||||
func (a *applierV3backend) Apply(r *pb.InternalRaftRequest) *applyResult {
|
||||
ar := &applyResult{}
|
||||
|
||||
// call into a.s.applyV3.F instead of a.F so upper appliers can check individual calls
|
||||
switch {
|
||||
case r.Range != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.Range(nil, r.Range)
|
||||
case r.Put != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.Put(nil, r.Put)
|
||||
case r.DeleteRange != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.DeleteRange(nil, r.DeleteRange)
|
||||
case r.Txn != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.Txn(r.Txn)
|
||||
case r.Compaction != nil:
|
||||
ar.resp, ar.physc, ar.err = a.s.applyV3.Compaction(r.Compaction)
|
||||
case r.LeaseGrant != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.LeaseGrant(r.LeaseGrant)
|
||||
case r.LeaseRevoke != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.LeaseRevoke(r.LeaseRevoke)
|
||||
case r.Alarm != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.Alarm(r.Alarm)
|
||||
case r.Authenticate != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.Authenticate(r.Authenticate)
|
||||
case r.AuthEnable != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.AuthEnable()
|
||||
case r.AuthDisable != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.AuthDisable()
|
||||
case r.AuthUserAdd != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.UserAdd(r.AuthUserAdd)
|
||||
case r.AuthUserDelete != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.UserDelete(r.AuthUserDelete)
|
||||
case r.AuthUserChangePassword != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.UserChangePassword(r.AuthUserChangePassword)
|
||||
case r.AuthUserGrantRole != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.UserGrantRole(r.AuthUserGrantRole)
|
||||
case r.AuthUserGet != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.UserGet(r.AuthUserGet)
|
||||
case r.AuthUserRevokeRole != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.UserRevokeRole(r.AuthUserRevokeRole)
|
||||
case r.AuthRoleAdd != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.RoleAdd(r.AuthRoleAdd)
|
||||
case r.AuthRoleGrantPermission != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.RoleGrantPermission(r.AuthRoleGrantPermission)
|
||||
case r.AuthRoleGet != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.RoleGet(r.AuthRoleGet)
|
||||
case r.AuthRoleRevokePermission != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.RoleRevokePermission(r.AuthRoleRevokePermission)
|
||||
case r.AuthRoleDelete != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.RoleDelete(r.AuthRoleDelete)
|
||||
case r.AuthUserList != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.UserList(r.AuthUserList)
|
||||
case r.AuthRoleList != nil:
|
||||
ar.resp, ar.err = a.s.applyV3.RoleList(r.AuthRoleList)
|
||||
default:
|
||||
panic("not implemented")
|
||||
}
|
||||
return ar
|
||||
}
|
||||
|
||||
func (a *applierV3backend) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (resp *pb.PutResponse, err error) {
|
||||
resp = &pb.PutResponse{}
|
||||
resp.Header = &pb.ResponseHeader{}
|
||||
|
||||
val, leaseID := p.Value, lease.LeaseID(p.Lease)
|
||||
if txn == nil {
|
||||
if leaseID != lease.NoLease {
|
||||
if l := a.s.lessor.Lookup(leaseID); l == nil {
|
||||
return nil, lease.ErrLeaseNotFound
|
||||
}
|
||||
}
|
||||
txn = a.s.KV().Write()
|
||||
defer txn.End()
|
||||
}
|
||||
|
||||
var rr *mvcc.RangeResult
|
||||
if p.IgnoreValue || p.IgnoreLease || p.PrevKv {
|
||||
rr, err = txn.Range(p.Key, nil, mvcc.RangeOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if p.IgnoreValue || p.IgnoreLease {
|
||||
if rr == nil || len(rr.KVs) == 0 {
|
||||
// ignore_{lease,value} flag expects previous key-value pair
|
||||
return nil, ErrKeyNotFound
|
||||
}
|
||||
}
|
||||
if p.IgnoreValue {
|
||||
val = rr.KVs[0].Value
|
||||
}
|
||||
if p.IgnoreLease {
|
||||
leaseID = lease.LeaseID(rr.KVs[0].Lease)
|
||||
}
|
||||
if p.PrevKv {
|
||||
if rr != nil && len(rr.KVs) != 0 {
|
||||
resp.PrevKv = &rr.KVs[0]
|
||||
}
|
||||
}
|
||||
|
||||
resp.Header.Revision = txn.Put(p.Key, val, leaseID)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (a *applierV3backend) DeleteRange(txn mvcc.TxnWrite, dr *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
|
||||
resp := &pb.DeleteRangeResponse{}
|
||||
resp.Header = &pb.ResponseHeader{}
|
||||
|
||||
if txn == nil {
|
||||
txn = a.s.kv.Write()
|
||||
defer txn.End()
|
||||
}
|
||||
|
||||
if isGteRange(dr.RangeEnd) {
|
||||
dr.RangeEnd = []byte{}
|
||||
}
|
||||
|
||||
if dr.PrevKv {
|
||||
rr, err := txn.Range(dr.Key, dr.RangeEnd, mvcc.RangeOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if rr != nil {
|
||||
for i := range rr.KVs {
|
||||
resp.PrevKvs = append(resp.PrevKvs, &rr.KVs[i])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resp.Deleted, resp.Header.Revision = txn.DeleteRange(dr.Key, dr.RangeEnd)
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (a *applierV3backend) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) {
|
||||
resp := &pb.RangeResponse{}
|
||||
resp.Header = &pb.ResponseHeader{}
|
||||
|
||||
if txn == nil {
|
||||
txn = a.s.kv.Read()
|
||||
defer txn.End()
|
||||
}
|
||||
|
||||
if isGteRange(r.RangeEnd) {
|
||||
r.RangeEnd = []byte{}
|
||||
}
|
||||
|
||||
limit := r.Limit
|
||||
if r.SortOrder != pb.RangeRequest_NONE ||
|
||||
r.MinModRevision != 0 || r.MaxModRevision != 0 ||
|
||||
r.MinCreateRevision != 0 || r.MaxCreateRevision != 0 {
|
||||
// fetch everything; sort and truncate afterwards
|
||||
limit = 0
|
||||
}
|
||||
if limit > 0 {
|
||||
// fetch one extra for 'more' flag
|
||||
limit = limit + 1
|
||||
}
|
||||
|
||||
ro := mvcc.RangeOptions{
|
||||
Limit: limit,
|
||||
Rev: r.Revision,
|
||||
Count: r.CountOnly,
|
||||
}
|
||||
|
||||
rr, err := txn.Range(r.Key, r.RangeEnd, ro)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if r.MaxModRevision != 0 {
|
||||
f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision > r.MaxModRevision }
|
||||
pruneKVs(rr, f)
|
||||
}
|
||||
if r.MinModRevision != 0 {
|
||||
f := func(kv *mvccpb.KeyValue) bool { return kv.ModRevision < r.MinModRevision }
|
||||
pruneKVs(rr, f)
|
||||
}
|
||||
if r.MaxCreateRevision != 0 {
|
||||
f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision > r.MaxCreateRevision }
|
||||
pruneKVs(rr, f)
|
||||
}
|
||||
if r.MinCreateRevision != 0 {
|
||||
f := func(kv *mvccpb.KeyValue) bool { return kv.CreateRevision < r.MinCreateRevision }
|
||||
pruneKVs(rr, f)
|
||||
}
|
||||
|
||||
sortOrder := r.SortOrder
|
||||
if r.SortTarget != pb.RangeRequest_KEY && sortOrder == pb.RangeRequest_NONE {
|
||||
// Since current mvcc.Range implementation returns results
|
||||
// sorted by keys in lexiographically ascending order,
|
||||
// sort ASCEND by default only when target is not 'KEY'
|
||||
sortOrder = pb.RangeRequest_ASCEND
|
||||
}
|
||||
if sortOrder != pb.RangeRequest_NONE {
|
||||
var sorter sort.Interface
|
||||
switch {
|
||||
case r.SortTarget == pb.RangeRequest_KEY:
|
||||
sorter = &kvSortByKey{&kvSort{rr.KVs}}
|
||||
case r.SortTarget == pb.RangeRequest_VERSION:
|
||||
sorter = &kvSortByVersion{&kvSort{rr.KVs}}
|
||||
case r.SortTarget == pb.RangeRequest_CREATE:
|
||||
sorter = &kvSortByCreate{&kvSort{rr.KVs}}
|
||||
case r.SortTarget == pb.RangeRequest_MOD:
|
||||
sorter = &kvSortByMod{&kvSort{rr.KVs}}
|
||||
case r.SortTarget == pb.RangeRequest_VALUE:
|
||||
sorter = &kvSortByValue{&kvSort{rr.KVs}}
|
||||
}
|
||||
switch {
|
||||
case sortOrder == pb.RangeRequest_ASCEND:
|
||||
sort.Sort(sorter)
|
||||
case sortOrder == pb.RangeRequest_DESCEND:
|
||||
sort.Sort(sort.Reverse(sorter))
|
||||
}
|
||||
}
|
||||
|
||||
if r.Limit > 0 && len(rr.KVs) > int(r.Limit) {
|
||||
rr.KVs = rr.KVs[:r.Limit]
|
||||
resp.More = true
|
||||
}
|
||||
|
||||
resp.Header.Revision = rr.Rev
|
||||
resp.Count = int64(rr.Count)
|
||||
for i := range rr.KVs {
|
||||
if r.KeysOnly {
|
||||
rr.KVs[i].Value = nil
|
||||
}
|
||||
resp.Kvs = append(resp.Kvs, &rr.KVs[i])
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (a *applierV3backend) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
isWrite := !isTxnReadonly(rt)
|
||||
txn := mvcc.NewReadOnlyTxnWrite(a.s.KV().Read())
|
||||
|
||||
reqs, ok := a.compareToOps(txn, rt)
|
||||
if isWrite {
|
||||
if err := a.checkRequestPut(txn, reqs); err != nil {
|
||||
txn.End()
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if err := checkRequestRange(txn, reqs); err != nil {
|
||||
txn.End()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
resps := make([]*pb.ResponseOp, len(reqs))
|
||||
txnResp := &pb.TxnResponse{
|
||||
Responses: resps,
|
||||
Succeeded: ok,
|
||||
Header: &pb.ResponseHeader{},
|
||||
}
|
||||
|
||||
// When executing mutable txn ops, etcd must hold the txn lock so
|
||||
// readers do not see any intermediate results. Since writes are
|
||||
// serialized on the raft loop, the revision in the read view will
|
||||
// be the revision of the write txn.
|
||||
if isWrite {
|
||||
txn.End()
|
||||
txn = a.s.KV().Write()
|
||||
}
|
||||
for i := range reqs {
|
||||
resps[i] = a.applyUnion(txn, reqs[i])
|
||||
}
|
||||
rev := txn.Rev()
|
||||
if len(txn.Changes()) != 0 {
|
||||
rev++
|
||||
}
|
||||
txn.End()
|
||||
|
||||
txnResp.Header.Revision = rev
|
||||
return txnResp, nil
|
||||
}
|
||||
|
||||
func (a *applierV3backend) compareToOps(rv mvcc.ReadView, rt *pb.TxnRequest) ([]*pb.RequestOp, bool) {
|
||||
for _, c := range rt.Compare {
|
||||
if !applyCompare(rv, c) {
|
||||
return rt.Failure, false
|
||||
}
|
||||
}
|
||||
return rt.Success, true
|
||||
}
|
||||
|
||||
// applyCompare applies the compare request.
|
||||
// If the comparison succeeds, it returns true. Otherwise, returns false.
|
||||
func applyCompare(rv mvcc.ReadView, c *pb.Compare) bool {
|
||||
rr, err := rv.Range(c.Key, nil, mvcc.RangeOptions{})
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
var ckv mvccpb.KeyValue
|
||||
if len(rr.KVs) != 0 {
|
||||
ckv = rr.KVs[0]
|
||||
} else {
|
||||
// Use the zero value of ckv normally. However...
|
||||
if c.Target == pb.Compare_VALUE {
|
||||
// Always fail if we're comparing a value on a key that doesn't exist.
|
||||
// We can treat non-existence as the empty set explicitly, such that
|
||||
// even a key with a value of length 0 bytes is still a real key
|
||||
// that was written that way
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// -1 is less, 0 is equal, 1 is greater
|
||||
var result int
|
||||
switch c.Target {
|
||||
case pb.Compare_VALUE:
|
||||
tv, _ := c.TargetUnion.(*pb.Compare_Value)
|
||||
if tv != nil {
|
||||
result = bytes.Compare(ckv.Value, tv.Value)
|
||||
}
|
||||
case pb.Compare_CREATE:
|
||||
tv, _ := c.TargetUnion.(*pb.Compare_CreateRevision)
|
||||
if tv != nil {
|
||||
result = compareInt64(ckv.CreateRevision, tv.CreateRevision)
|
||||
}
|
||||
|
||||
case pb.Compare_MOD:
|
||||
tv, _ := c.TargetUnion.(*pb.Compare_ModRevision)
|
||||
if tv != nil {
|
||||
result = compareInt64(ckv.ModRevision, tv.ModRevision)
|
||||
}
|
||||
case pb.Compare_VERSION:
|
||||
tv, _ := c.TargetUnion.(*pb.Compare_Version)
|
||||
if tv != nil {
|
||||
result = compareInt64(ckv.Version, tv.Version)
|
||||
}
|
||||
}
|
||||
|
||||
switch c.Result {
|
||||
case pb.Compare_EQUAL:
|
||||
return result == 0
|
||||
case pb.Compare_NOT_EQUAL:
|
||||
return result != 0
|
||||
case pb.Compare_GREATER:
|
||||
return result > 0
|
||||
case pb.Compare_LESS:
|
||||
return result < 0
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (a *applierV3backend) applyUnion(txn mvcc.TxnWrite, union *pb.RequestOp) *pb.ResponseOp {
|
||||
switch tv := union.Request.(type) {
|
||||
case *pb.RequestOp_RequestRange:
|
||||
if tv.RequestRange != nil {
|
||||
resp, err := a.Range(txn, tv.RequestRange)
|
||||
if err != nil {
|
||||
plog.Panicf("unexpected error during txn: %v", err)
|
||||
}
|
||||
return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseRange{ResponseRange: resp}}
|
||||
}
|
||||
case *pb.RequestOp_RequestPut:
|
||||
if tv.RequestPut != nil {
|
||||
resp, err := a.Put(txn, tv.RequestPut)
|
||||
if err != nil {
|
||||
plog.Panicf("unexpected error during txn: %v", err)
|
||||
}
|
||||
return &pb.ResponseOp{Response: &pb.ResponseOp_ResponsePut{ResponsePut: resp}}
|
||||
}
|
||||
case *pb.RequestOp_RequestDeleteRange:
|
||||
if tv.RequestDeleteRange != nil {
|
||||
resp, err := a.DeleteRange(txn, tv.RequestDeleteRange)
|
||||
if err != nil {
|
||||
plog.Panicf("unexpected error during txn: %v", err)
|
||||
}
|
||||
return &pb.ResponseOp{Response: &pb.ResponseOp_ResponseDeleteRange{ResponseDeleteRange: resp}}
|
||||
}
|
||||
default:
|
||||
// empty union
|
||||
return nil
|
||||
}
|
||||
return nil
|
||||
|
||||
}
|
||||
|
||||
func (a *applierV3backend) Compaction(compaction *pb.CompactionRequest) (*pb.CompactionResponse, <-chan struct{}, error) {
|
||||
resp := &pb.CompactionResponse{}
|
||||
resp.Header = &pb.ResponseHeader{}
|
||||
ch, err := a.s.KV().Compact(compaction.Revision)
|
||||
if err != nil {
|
||||
return nil, ch, err
|
||||
}
|
||||
// get the current revision. which key to get is not important.
|
||||
rr, _ := a.s.KV().Range([]byte("compaction"), nil, mvcc.RangeOptions{})
|
||||
resp.Header.Revision = rr.Rev
|
||||
return resp, ch, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
|
||||
l, err := a.s.lessor.Grant(lease.LeaseID(lc.ID), lc.TTL)
|
||||
resp := &pb.LeaseGrantResponse{}
|
||||
if err == nil {
|
||||
resp.ID = int64(l.ID)
|
||||
resp.TTL = l.TTL()
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) LeaseRevoke(lc *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
|
||||
err := a.s.lessor.Revoke(lease.LeaseID(lc.ID))
|
||||
return &pb.LeaseRevokeResponse{Header: newHeader(a.s)}, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) Alarm(ar *pb.AlarmRequest) (*pb.AlarmResponse, error) {
|
||||
resp := &pb.AlarmResponse{}
|
||||
oldCount := len(a.s.alarmStore.Get(ar.Alarm))
|
||||
|
||||
switch ar.Action {
|
||||
case pb.AlarmRequest_GET:
|
||||
resp.Alarms = a.s.alarmStore.Get(ar.Alarm)
|
||||
case pb.AlarmRequest_ACTIVATE:
|
||||
m := a.s.alarmStore.Activate(types.ID(ar.MemberID), ar.Alarm)
|
||||
if m == nil {
|
||||
break
|
||||
}
|
||||
resp.Alarms = append(resp.Alarms, m)
|
||||
activated := oldCount == 0 && len(a.s.alarmStore.Get(m.Alarm)) == 1
|
||||
if !activated {
|
||||
break
|
||||
}
|
||||
|
||||
switch m.Alarm {
|
||||
case pb.AlarmType_NOSPACE:
|
||||
plog.Warningf("alarm raised %+v", m)
|
||||
a.s.applyV3 = newApplierV3Capped(a)
|
||||
default:
|
||||
plog.Errorf("unimplemented alarm activation (%+v)", m)
|
||||
}
|
||||
case pb.AlarmRequest_DEACTIVATE:
|
||||
m := a.s.alarmStore.Deactivate(types.ID(ar.MemberID), ar.Alarm)
|
||||
if m == nil {
|
||||
break
|
||||
}
|
||||
resp.Alarms = append(resp.Alarms, m)
|
||||
deactivated := oldCount > 0 && len(a.s.alarmStore.Get(ar.Alarm)) == 0
|
||||
if !deactivated {
|
||||
break
|
||||
}
|
||||
|
||||
switch m.Alarm {
|
||||
case pb.AlarmType_NOSPACE:
|
||||
plog.Infof("alarm disarmed %+v", ar)
|
||||
a.s.applyV3 = a.s.newApplierV3()
|
||||
default:
|
||||
plog.Errorf("unimplemented alarm deactivation (%+v)", m)
|
||||
}
|
||||
default:
|
||||
return nil, nil
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
type applierV3Capped struct {
|
||||
applierV3
|
||||
q backendQuota
|
||||
}
|
||||
|
||||
// newApplierV3Capped creates an applyV3 that will reject Puts and transactions
|
||||
// with Puts so that the number of keys in the store is capped.
|
||||
func newApplierV3Capped(base applierV3) applierV3 { return &applierV3Capped{applierV3: base} }
|
||||
|
||||
func (a *applierV3Capped) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) {
|
||||
return nil, ErrNoSpace
|
||||
}
|
||||
|
||||
func (a *applierV3Capped) Txn(r *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
if a.q.Cost(r) > 0 {
|
||||
return nil, ErrNoSpace
|
||||
}
|
||||
return a.applierV3.Txn(r)
|
||||
}
|
||||
|
||||
func (a *applierV3Capped) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
|
||||
return nil, ErrNoSpace
|
||||
}
|
||||
|
||||
func (a *applierV3backend) AuthEnable() (*pb.AuthEnableResponse, error) {
|
||||
err := a.s.AuthStore().AuthEnable()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &pb.AuthEnableResponse{Header: newHeader(a.s)}, nil
|
||||
}
|
||||
|
||||
func (a *applierV3backend) AuthDisable() (*pb.AuthDisableResponse, error) {
|
||||
a.s.AuthStore().AuthDisable()
|
||||
return &pb.AuthDisableResponse{Header: newHeader(a.s)}, nil
|
||||
}
|
||||
|
||||
func (a *applierV3backend) Authenticate(r *pb.InternalAuthenticateRequest) (*pb.AuthenticateResponse, error) {
|
||||
ctx := context.WithValue(context.WithValue(a.s.ctx, "index", a.s.consistIndex.ConsistentIndex()), "simpleToken", r.SimpleToken)
|
||||
resp, err := a.s.AuthStore().Authenticate(ctx, r.Name, r.Password)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) UserAdd(r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
|
||||
resp, err := a.s.AuthStore().UserAdd(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) UserDelete(r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
|
||||
resp, err := a.s.AuthStore().UserDelete(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) UserChangePassword(r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
|
||||
resp, err := a.s.AuthStore().UserChangePassword(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) UserGrantRole(r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
|
||||
resp, err := a.s.AuthStore().UserGrantRole(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) UserGet(r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
|
||||
resp, err := a.s.AuthStore().UserGet(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) UserRevokeRole(r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
|
||||
resp, err := a.s.AuthStore().UserRevokeRole(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) RoleAdd(r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
|
||||
resp, err := a.s.AuthStore().RoleAdd(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) RoleGrantPermission(r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
|
||||
resp, err := a.s.AuthStore().RoleGrantPermission(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) RoleGet(r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
|
||||
resp, err := a.s.AuthStore().RoleGet(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) RoleRevokePermission(r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
|
||||
resp, err := a.s.AuthStore().RoleRevokePermission(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) RoleDelete(r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
|
||||
resp, err := a.s.AuthStore().RoleDelete(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) UserList(r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
|
||||
resp, err := a.s.AuthStore().UserList(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *applierV3backend) RoleList(r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
|
||||
resp, err := a.s.AuthStore().RoleList(r)
|
||||
if resp != nil {
|
||||
resp.Header = newHeader(a.s)
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
type quotaApplierV3 struct {
|
||||
applierV3
|
||||
q Quota
|
||||
}
|
||||
|
||||
func newQuotaApplierV3(s *EtcdServer, app applierV3) applierV3 {
|
||||
return "aApplierV3{app, NewBackendQuota(s)}
|
||||
}
|
||||
|
||||
func (a *quotaApplierV3) Put(txn mvcc.TxnWrite, p *pb.PutRequest) (*pb.PutResponse, error) {
|
||||
ok := a.q.Available(p)
|
||||
resp, err := a.applierV3.Put(txn, p)
|
||||
if err == nil && !ok {
|
||||
err = ErrNoSpace
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *quotaApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
ok := a.q.Available(rt)
|
||||
resp, err := a.applierV3.Txn(rt)
|
||||
if err == nil && !ok {
|
||||
err = ErrNoSpace
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (a *quotaApplierV3) LeaseGrant(lc *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
|
||||
ok := a.q.Available(lc)
|
||||
resp, err := a.applierV3.LeaseGrant(lc)
|
||||
if err == nil && !ok {
|
||||
err = ErrNoSpace
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
type kvSort struct{ kvs []mvccpb.KeyValue }
|
||||
|
||||
func (s *kvSort) Swap(i, j int) {
|
||||
t := s.kvs[i]
|
||||
s.kvs[i] = s.kvs[j]
|
||||
s.kvs[j] = t
|
||||
}
|
||||
func (s *kvSort) Len() int { return len(s.kvs) }
|
||||
|
||||
type kvSortByKey struct{ *kvSort }
|
||||
|
||||
func (s *kvSortByKey) Less(i, j int) bool {
|
||||
return bytes.Compare(s.kvs[i].Key, s.kvs[j].Key) < 0
|
||||
}
|
||||
|
||||
type kvSortByVersion struct{ *kvSort }
|
||||
|
||||
func (s *kvSortByVersion) Less(i, j int) bool {
|
||||
return (s.kvs[i].Version - s.kvs[j].Version) < 0
|
||||
}
|
||||
|
||||
type kvSortByCreate struct{ *kvSort }
|
||||
|
||||
func (s *kvSortByCreate) Less(i, j int) bool {
|
||||
return (s.kvs[i].CreateRevision - s.kvs[j].CreateRevision) < 0
|
||||
}
|
||||
|
||||
type kvSortByMod struct{ *kvSort }
|
||||
|
||||
func (s *kvSortByMod) Less(i, j int) bool {
|
||||
return (s.kvs[i].ModRevision - s.kvs[j].ModRevision) < 0
|
||||
}
|
||||
|
||||
type kvSortByValue struct{ *kvSort }
|
||||
|
||||
func (s *kvSortByValue) Less(i, j int) bool {
|
||||
return bytes.Compare(s.kvs[i].Value, s.kvs[j].Value) < 0
|
||||
}
|
||||
|
||||
func (a *applierV3backend) checkRequestPut(rv mvcc.ReadView, reqs []*pb.RequestOp) error {
|
||||
for _, requ := range reqs {
|
||||
tv, ok := requ.Request.(*pb.RequestOp_RequestPut)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
preq := tv.RequestPut
|
||||
if preq == nil {
|
||||
continue
|
||||
}
|
||||
if preq.IgnoreValue || preq.IgnoreLease {
|
||||
// expects previous key-value, error if not exist
|
||||
rr, err := rv.Range(preq.Key, nil, mvcc.RangeOptions{})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rr == nil || len(rr.KVs) == 0 {
|
||||
return ErrKeyNotFound
|
||||
}
|
||||
}
|
||||
if lease.LeaseID(preq.Lease) == lease.NoLease {
|
||||
continue
|
||||
}
|
||||
if l := a.s.lessor.Lookup(lease.LeaseID(preq.Lease)); l == nil {
|
||||
return lease.ErrLeaseNotFound
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkRequestRange(rv mvcc.ReadView, reqs []*pb.RequestOp) error {
|
||||
for _, requ := range reqs {
|
||||
tv, ok := requ.Request.(*pb.RequestOp_RequestRange)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
greq := tv.RequestRange
|
||||
if greq == nil || greq.Revision == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
if greq.Revision > rv.Rev() {
|
||||
return mvcc.ErrFutureRev
|
||||
}
|
||||
if greq.Revision < rv.FirstRev() {
|
||||
return mvcc.ErrCompacted
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func compareInt64(a, b int64) int {
|
||||
switch {
|
||||
case a < b:
|
||||
return -1
|
||||
case a > b:
|
||||
return 1
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// isGteRange determines if the range end is a >= range. This works around grpc
|
||||
// sending empty byte strings as nil; >= is encoded in the range end as '\0'.
|
||||
func isGteRange(rangeEnd []byte) bool {
|
||||
return len(rangeEnd) == 1 && rangeEnd[0] == 0
|
||||
}
|
||||
|
||||
func noSideEffect(r *pb.InternalRaftRequest) bool {
|
||||
return r.Range != nil || r.AuthUserGet != nil || r.AuthRoleGet != nil
|
||||
}
|
||||
|
||||
func removeNeedlessRangeReqs(txn *pb.TxnRequest) {
|
||||
f := func(ops []*pb.RequestOp) []*pb.RequestOp {
|
||||
j := 0
|
||||
for i := 0; i < len(ops); i++ {
|
||||
if _, ok := ops[i].Request.(*pb.RequestOp_RequestRange); ok {
|
||||
continue
|
||||
}
|
||||
ops[j] = ops[i]
|
||||
j++
|
||||
}
|
||||
|
||||
return ops[:j]
|
||||
}
|
||||
|
||||
txn.Success = f(txn.Success)
|
||||
txn.Failure = f(txn.Failure)
|
||||
}
|
||||
|
||||
func pruneKVs(rr *mvcc.RangeResult, isPrunable func(*mvccpb.KeyValue) bool) {
|
||||
j := 0
|
||||
for i := range rr.KVs {
|
||||
rr.KVs[j] = rr.KVs[i]
|
||||
if !isPrunable(&rr.KVs[i]) {
|
||||
j++
|
||||
}
|
||||
}
|
||||
rr.KVs = rr.KVs[:j]
|
||||
}
|
||||
|
||||
func newHeader(s *EtcdServer) *pb.ResponseHeader {
|
||||
return &pb.ResponseHeader{
|
||||
ClusterId: uint64(s.Cluster().ID()),
|
||||
MemberId: uint64(s.ID()),
|
||||
Revision: s.KV().Rev(),
|
||||
RaftTerm: s.Term(),
|
||||
}
|
||||
}
|
196
vendor/github.com/coreos/etcd/etcdserver/apply_auth.go
generated
vendored
Normal file
196
vendor/github.com/coreos/etcd/etcdserver/apply_auth.go
generated
vendored
Normal file
|
@ -0,0 +1,196 @@
|
|||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/coreos/etcd/auth"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
)
|
||||
|
||||
type authApplierV3 struct {
|
||||
applierV3
|
||||
as auth.AuthStore
|
||||
|
||||
// mu serializes Apply so that user isn't corrupted and so that
|
||||
// serialized requests don't leak data from TOCTOU errors
|
||||
mu sync.Mutex
|
||||
|
||||
authInfo auth.AuthInfo
|
||||
}
|
||||
|
||||
func newAuthApplierV3(as auth.AuthStore, base applierV3) *authApplierV3 {
|
||||
return &authApplierV3{applierV3: base, as: as}
|
||||
}
|
||||
|
||||
func (aa *authApplierV3) Apply(r *pb.InternalRaftRequest) *applyResult {
|
||||
aa.mu.Lock()
|
||||
defer aa.mu.Unlock()
|
||||
if r.Header != nil {
|
||||
// backward-compatible with pre-3.0 releases when internalRaftRequest
|
||||
// does not have header field
|
||||
aa.authInfo.Username = r.Header.Username
|
||||
aa.authInfo.Revision = r.Header.AuthRevision
|
||||
}
|
||||
if needAdminPermission(r) {
|
||||
if err := aa.as.IsAdminPermitted(&aa.authInfo); err != nil {
|
||||
aa.authInfo.Username = ""
|
||||
aa.authInfo.Revision = 0
|
||||
return &applyResult{err: err}
|
||||
}
|
||||
}
|
||||
ret := aa.applierV3.Apply(r)
|
||||
aa.authInfo.Username = ""
|
||||
aa.authInfo.Revision = 0
|
||||
return ret
|
||||
}
|
||||
|
||||
func (aa *authApplierV3) Put(txn mvcc.TxnWrite, r *pb.PutRequest) (*pb.PutResponse, error) {
|
||||
if err := aa.as.IsPutPermitted(&aa.authInfo, r.Key); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if r.PrevKv {
|
||||
err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return aa.applierV3.Put(txn, r)
|
||||
}
|
||||
|
||||
func (aa *authApplierV3) Range(txn mvcc.TxnRead, r *pb.RangeRequest) (*pb.RangeResponse, error) {
|
||||
if err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return aa.applierV3.Range(txn, r)
|
||||
}
|
||||
|
||||
func (aa *authApplierV3) DeleteRange(txn mvcc.TxnWrite, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
|
||||
if err := aa.as.IsDeleteRangePermitted(&aa.authInfo, r.Key, r.RangeEnd); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if r.PrevKv {
|
||||
err := aa.as.IsRangePermitted(&aa.authInfo, r.Key, r.RangeEnd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return aa.applierV3.DeleteRange(txn, r)
|
||||
}
|
||||
|
||||
func checkTxnReqsPermission(as auth.AuthStore, ai *auth.AuthInfo, reqs []*pb.RequestOp) error {
|
||||
for _, requ := range reqs {
|
||||
switch tv := requ.Request.(type) {
|
||||
case *pb.RequestOp_RequestRange:
|
||||
if tv.RequestRange == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := as.IsRangePermitted(ai, tv.RequestRange.Key, tv.RequestRange.RangeEnd); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *pb.RequestOp_RequestPut:
|
||||
if tv.RequestPut == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := as.IsPutPermitted(ai, tv.RequestPut.Key); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case *pb.RequestOp_RequestDeleteRange:
|
||||
if tv.RequestDeleteRange == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
if tv.RequestDeleteRange.PrevKv {
|
||||
err := as.IsRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
err := as.IsDeleteRangePermitted(ai, tv.RequestDeleteRange.Key, tv.RequestDeleteRange.RangeEnd)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkTxnAuth(as auth.AuthStore, ai *auth.AuthInfo, rt *pb.TxnRequest) error {
|
||||
for _, c := range rt.Compare {
|
||||
if err := as.IsRangePermitted(ai, c.Key, nil); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := checkTxnReqsPermission(as, ai, rt.Success); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := checkTxnReqsPermission(as, ai, rt.Failure); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (aa *authApplierV3) Txn(rt *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
if err := checkTxnAuth(aa.as, &aa.authInfo, rt); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return aa.applierV3.Txn(rt)
|
||||
}
|
||||
|
||||
func needAdminPermission(r *pb.InternalRaftRequest) bool {
|
||||
switch {
|
||||
case r.AuthEnable != nil:
|
||||
return true
|
||||
case r.AuthDisable != nil:
|
||||
return true
|
||||
case r.AuthUserAdd != nil:
|
||||
return true
|
||||
case r.AuthUserDelete != nil:
|
||||
return true
|
||||
case r.AuthUserChangePassword != nil:
|
||||
return true
|
||||
case r.AuthUserGrantRole != nil:
|
||||
return true
|
||||
case r.AuthUserGet != nil:
|
||||
return true
|
||||
case r.AuthUserRevokeRole != nil:
|
||||
return true
|
||||
case r.AuthRoleAdd != nil:
|
||||
return true
|
||||
case r.AuthRoleGrantPermission != nil:
|
||||
return true
|
||||
case r.AuthRoleGet != nil:
|
||||
return true
|
||||
case r.AuthRoleRevokePermission != nil:
|
||||
return true
|
||||
case r.AuthRoleDelete != nil:
|
||||
return true
|
||||
case r.AuthUserList != nil:
|
||||
return true
|
||||
case r.AuthRoleList != nil:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
140
vendor/github.com/coreos/etcd/etcdserver/apply_v2.go
generated
vendored
Normal file
140
vendor/github.com/coreos/etcd/etcdserver/apply_v2.go
generated
vendored
Normal file
|
@ -0,0 +1,140 @@
|
|||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"path"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver/api"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/etcdserver/membership"
|
||||
"github.com/coreos/etcd/pkg/pbutil"
|
||||
"github.com/coreos/etcd/store"
|
||||
"github.com/coreos/go-semver/semver"
|
||||
)
|
||||
|
||||
// ApplierV2 is the interface for processing V2 raft messages
|
||||
type ApplierV2 interface {
|
||||
Delete(r *pb.Request) Response
|
||||
Post(r *pb.Request) Response
|
||||
Put(r *pb.Request) Response
|
||||
QGet(r *pb.Request) Response
|
||||
Sync(r *pb.Request) Response
|
||||
}
|
||||
|
||||
func NewApplierV2(s store.Store, c *membership.RaftCluster) ApplierV2 {
|
||||
return &applierV2store{store: s, cluster: c}
|
||||
}
|
||||
|
||||
type applierV2store struct {
|
||||
store store.Store
|
||||
cluster *membership.RaftCluster
|
||||
}
|
||||
|
||||
func (a *applierV2store) Delete(r *pb.Request) Response {
|
||||
switch {
|
||||
case r.PrevIndex > 0 || r.PrevValue != "":
|
||||
return toResponse(a.store.CompareAndDelete(r.Path, r.PrevValue, r.PrevIndex))
|
||||
default:
|
||||
return toResponse(a.store.Delete(r.Path, r.Dir, r.Recursive))
|
||||
}
|
||||
}
|
||||
|
||||
func (a *applierV2store) Post(r *pb.Request) Response {
|
||||
return toResponse(a.store.Create(r.Path, r.Dir, r.Val, true, toTTLOptions(r)))
|
||||
}
|
||||
|
||||
func (a *applierV2store) Put(r *pb.Request) Response {
|
||||
ttlOptions := toTTLOptions(r)
|
||||
exists, existsSet := pbutil.GetBool(r.PrevExist)
|
||||
switch {
|
||||
case existsSet:
|
||||
if exists {
|
||||
if r.PrevIndex == 0 && r.PrevValue == "" {
|
||||
return toResponse(a.store.Update(r.Path, r.Val, ttlOptions))
|
||||
}
|
||||
return toResponse(a.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions))
|
||||
}
|
||||
return toResponse(a.store.Create(r.Path, r.Dir, r.Val, false, ttlOptions))
|
||||
case r.PrevIndex > 0 || r.PrevValue != "":
|
||||
return toResponse(a.store.CompareAndSwap(r.Path, r.PrevValue, r.PrevIndex, r.Val, ttlOptions))
|
||||
default:
|
||||
if storeMemberAttributeRegexp.MatchString(r.Path) {
|
||||
id := membership.MustParseMemberIDFromKey(path.Dir(r.Path))
|
||||
var attr membership.Attributes
|
||||
if err := json.Unmarshal([]byte(r.Val), &attr); err != nil {
|
||||
plog.Panicf("unmarshal %s should never fail: %v", r.Val, err)
|
||||
}
|
||||
if a.cluster != nil {
|
||||
a.cluster.UpdateAttributes(id, attr)
|
||||
}
|
||||
// return an empty response since there is no consumer.
|
||||
return Response{}
|
||||
}
|
||||
if r.Path == membership.StoreClusterVersionKey() {
|
||||
if a.cluster != nil {
|
||||
a.cluster.SetVersion(semver.Must(semver.NewVersion(r.Val)), api.UpdateCapability)
|
||||
}
|
||||
// return an empty response since there is no consumer.
|
||||
return Response{}
|
||||
}
|
||||
return toResponse(a.store.Set(r.Path, r.Dir, r.Val, ttlOptions))
|
||||
}
|
||||
}
|
||||
|
||||
func (a *applierV2store) QGet(r *pb.Request) Response {
|
||||
return toResponse(a.store.Get(r.Path, r.Recursive, r.Sorted))
|
||||
}
|
||||
|
||||
func (a *applierV2store) Sync(r *pb.Request) Response {
|
||||
a.store.DeleteExpiredKeys(time.Unix(0, r.Time))
|
||||
return Response{}
|
||||
}
|
||||
|
||||
// applyV2Request interprets r as a call to store.X and returns a Response interpreted
|
||||
// from store.Event
|
||||
func (s *EtcdServer) applyV2Request(r *pb.Request) Response {
|
||||
toTTLOptions(r)
|
||||
switch r.Method {
|
||||
case "POST":
|
||||
return s.applyV2.Post(r)
|
||||
case "PUT":
|
||||
return s.applyV2.Put(r)
|
||||
case "DELETE":
|
||||
return s.applyV2.Delete(r)
|
||||
case "QGET":
|
||||
return s.applyV2.QGet(r)
|
||||
case "SYNC":
|
||||
return s.applyV2.Sync(r)
|
||||
default:
|
||||
// This should never be reached, but just in case:
|
||||
return Response{err: ErrUnknownMethod}
|
||||
}
|
||||
}
|
||||
|
||||
func toTTLOptions(r *pb.Request) store.TTLOptionSet {
|
||||
refresh, _ := pbutil.GetBool(r.Refresh)
|
||||
ttlOptions := store.TTLOptionSet{Refresh: refresh}
|
||||
if r.Expiration != 0 {
|
||||
ttlOptions.ExpireTime = time.Unix(0, r.Expiration)
|
||||
}
|
||||
return ttlOptions
|
||||
}
|
||||
|
||||
func toResponse(ev *store.Event, err error) Response {
|
||||
return Response{Event: ev, err: err}
|
||||
}
|
81
vendor/github.com/coreos/etcd/etcdserver/backend.go
generated
vendored
Normal file
81
vendor/github.com/coreos/etcd/etcdserver/backend.go
generated
vendored
Normal file
|
@ -0,0 +1,81 @@
|
|||
// Copyright 2017 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/lease"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
"github.com/coreos/etcd/mvcc/backend"
|
||||
"github.com/coreos/etcd/raft/raftpb"
|
||||
"github.com/coreos/etcd/snap"
|
||||
)
|
||||
|
||||
func newBackend(cfg *ServerConfig) backend.Backend {
|
||||
bcfg := backend.DefaultBackendConfig()
|
||||
bcfg.Path = cfg.backendPath()
|
||||
if cfg.QuotaBackendBytes > 0 && cfg.QuotaBackendBytes != DefaultQuotaBytes {
|
||||
// permit 10% excess over quota for disarm
|
||||
bcfg.MmapSize = uint64(cfg.QuotaBackendBytes + cfg.QuotaBackendBytes/10)
|
||||
}
|
||||
return backend.New(bcfg)
|
||||
}
|
||||
|
||||
// openSnapshotBackend renames a snapshot db to the current etcd db and opens it.
|
||||
func openSnapshotBackend(cfg *ServerConfig, ss *snap.Snapshotter, snapshot raftpb.Snapshot) (backend.Backend, error) {
|
||||
snapPath, err := ss.DBFilePath(snapshot.Metadata.Index)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("database snapshot file path error: %v", err)
|
||||
}
|
||||
if err := os.Rename(snapPath, cfg.backendPath()); err != nil {
|
||||
return nil, fmt.Errorf("rename snapshot file error: %v", err)
|
||||
}
|
||||
return openBackend(cfg), nil
|
||||
}
|
||||
|
||||
// openBackend returns a backend using the current etcd db.
|
||||
func openBackend(cfg *ServerConfig) backend.Backend {
|
||||
fn := cfg.backendPath()
|
||||
beOpened := make(chan backend.Backend)
|
||||
go func() {
|
||||
beOpened <- newBackend(cfg)
|
||||
}()
|
||||
select {
|
||||
case be := <-beOpened:
|
||||
return be
|
||||
case <-time.After(time.Second):
|
||||
plog.Warningf("another etcd process is using %q and holds the file lock.", fn)
|
||||
plog.Warningf("waiting for it to exit before starting...")
|
||||
}
|
||||
return <-beOpened
|
||||
}
|
||||
|
||||
// recoverBackendSnapshot recovers the DB from a snapshot in case etcd crashes
|
||||
// before updating the backend db after persisting raft snapshot to disk,
|
||||
// violating the invariant snapshot.Metadata.Index < db.consistentIndex. In this
|
||||
// case, replace the db with the snapshot db sent by the leader.
|
||||
func recoverSnapshotBackend(cfg *ServerConfig, oldbe backend.Backend, snapshot raftpb.Snapshot) (backend.Backend, error) {
|
||||
var cIndex consistentIndex
|
||||
kv := mvcc.New(oldbe, &lease.FakeLessor{}, &cIndex)
|
||||
defer kv.Close()
|
||||
if snapshot.Metadata.Index <= kv.ConsistentIndex() {
|
||||
return oldbe, nil
|
||||
}
|
||||
oldbe.Close()
|
||||
return openSnapshotBackend(cfg, snap.New(cfg.SnapDir()), snapshot)
|
||||
}
|
258
vendor/github.com/coreos/etcd/etcdserver/cluster_util.go
generated
vendored
Normal file
258
vendor/github.com/coreos/etcd/etcdserver/cluster_util.go
generated
vendored
Normal file
|
@ -0,0 +1,258 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver/membership"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"github.com/coreos/etcd/version"
|
||||
"github.com/coreos/go-semver/semver"
|
||||
)
|
||||
|
||||
// isMemberBootstrapped tries to check if the given member has been bootstrapped
|
||||
// in the given cluster.
|
||||
func isMemberBootstrapped(cl *membership.RaftCluster, member string, rt http.RoundTripper, timeout time.Duration) bool {
|
||||
rcl, err := getClusterFromRemotePeers(getRemotePeerURLs(cl, member), timeout, false, rt)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
id := cl.MemberByName(member).ID
|
||||
m := rcl.Member(id)
|
||||
if m == nil {
|
||||
return false
|
||||
}
|
||||
if len(m.ClientURLs) > 0 {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetClusterFromRemotePeers takes a set of URLs representing etcd peers, and
|
||||
// attempts to construct a Cluster by accessing the members endpoint on one of
|
||||
// these URLs. The first URL to provide a response is used. If no URLs provide
|
||||
// a response, or a Cluster cannot be successfully created from a received
|
||||
// response, an error is returned.
|
||||
// Each request has a 10-second timeout. Because the upper limit of TTL is 5s,
|
||||
// 10 second is enough for building connection and finishing request.
|
||||
func GetClusterFromRemotePeers(urls []string, rt http.RoundTripper) (*membership.RaftCluster, error) {
|
||||
return getClusterFromRemotePeers(urls, 10*time.Second, true, rt)
|
||||
}
|
||||
|
||||
// If logerr is true, it prints out more error messages.
|
||||
func getClusterFromRemotePeers(urls []string, timeout time.Duration, logerr bool, rt http.RoundTripper) (*membership.RaftCluster, error) {
|
||||
cc := &http.Client{
|
||||
Transport: rt,
|
||||
Timeout: timeout,
|
||||
}
|
||||
for _, u := range urls {
|
||||
resp, err := cc.Get(u + "/members")
|
||||
if err != nil {
|
||||
if logerr {
|
||||
plog.Warningf("could not get cluster response from %s: %v", u, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
b, err := ioutil.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
if err != nil {
|
||||
if logerr {
|
||||
plog.Warningf("could not read the body of cluster response: %v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
var membs []*membership.Member
|
||||
if err = json.Unmarshal(b, &membs); err != nil {
|
||||
if logerr {
|
||||
plog.Warningf("could not unmarshal cluster response: %v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
id, err := types.IDFromString(resp.Header.Get("X-Etcd-Cluster-ID"))
|
||||
if err != nil {
|
||||
if logerr {
|
||||
plog.Warningf("could not parse the cluster ID from cluster res: %v", err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// check the length of membership members
|
||||
// if the membership members are present then prepare and return raft cluster
|
||||
// if membership members are not present then the raft cluster formed will be
|
||||
// an invalid empty cluster hence return failed to get raft cluster member(s) from the given urls error
|
||||
if len(membs) > 0 {
|
||||
return membership.NewClusterFromMembers("", id, membs), nil
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("failed to get raft cluster member(s) from the given urls.")
|
||||
}
|
||||
return nil, fmt.Errorf("could not retrieve cluster information from the given urls")
|
||||
}
|
||||
|
||||
// getRemotePeerURLs returns peer urls of remote members in the cluster. The
|
||||
// returned list is sorted in ascending lexicographical order.
|
||||
func getRemotePeerURLs(cl *membership.RaftCluster, local string) []string {
|
||||
us := make([]string, 0)
|
||||
for _, m := range cl.Members() {
|
||||
if m.Name == local {
|
||||
continue
|
||||
}
|
||||
us = append(us, m.PeerURLs...)
|
||||
}
|
||||
sort.Strings(us)
|
||||
return us
|
||||
}
|
||||
|
||||
// getVersions returns the versions of the members in the given cluster.
|
||||
// The key of the returned map is the member's ID. The value of the returned map
|
||||
// is the semver versions string, including server and cluster.
|
||||
// If it fails to get the version of a member, the key will be nil.
|
||||
func getVersions(cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) map[string]*version.Versions {
|
||||
members := cl.Members()
|
||||
vers := make(map[string]*version.Versions)
|
||||
for _, m := range members {
|
||||
if m.ID == local {
|
||||
cv := "not_decided"
|
||||
if cl.Version() != nil {
|
||||
cv = cl.Version().String()
|
||||
}
|
||||
vers[m.ID.String()] = &version.Versions{Server: version.Version, Cluster: cv}
|
||||
continue
|
||||
}
|
||||
ver, err := getVersion(m, rt)
|
||||
if err != nil {
|
||||
plog.Warningf("cannot get the version of member %s (%v)", m.ID, err)
|
||||
vers[m.ID.String()] = nil
|
||||
} else {
|
||||
vers[m.ID.String()] = ver
|
||||
}
|
||||
}
|
||||
return vers
|
||||
}
|
||||
|
||||
// decideClusterVersion decides the cluster version based on the versions map.
|
||||
// The returned version is the min server version in the map, or nil if the min
|
||||
// version in unknown.
|
||||
func decideClusterVersion(vers map[string]*version.Versions) *semver.Version {
|
||||
var cv *semver.Version
|
||||
lv := semver.Must(semver.NewVersion(version.Version))
|
||||
|
||||
for mid, ver := range vers {
|
||||
if ver == nil {
|
||||
return nil
|
||||
}
|
||||
v, err := semver.NewVersion(ver.Server)
|
||||
if err != nil {
|
||||
plog.Errorf("cannot understand the version of member %s (%v)", mid, err)
|
||||
return nil
|
||||
}
|
||||
if lv.LessThan(*v) {
|
||||
plog.Warningf("the local etcd version %s is not up-to-date", lv.String())
|
||||
plog.Warningf("member %s has a higher version %s", mid, ver.Server)
|
||||
}
|
||||
if cv == nil {
|
||||
cv = v
|
||||
} else if v.LessThan(*cv) {
|
||||
cv = v
|
||||
}
|
||||
}
|
||||
return cv
|
||||
}
|
||||
|
||||
// isCompatibleWithCluster return true if the local member has a compatible version with
|
||||
// the current running cluster.
|
||||
// The version is considered as compatible when at least one of the other members in the cluster has a
|
||||
// cluster version in the range of [MinClusterVersion, Version] and no known members has a cluster version
|
||||
// out of the range.
|
||||
// We set this rule since when the local member joins, another member might be offline.
|
||||
func isCompatibleWithCluster(cl *membership.RaftCluster, local types.ID, rt http.RoundTripper) bool {
|
||||
vers := getVersions(cl, local, rt)
|
||||
minV := semver.Must(semver.NewVersion(version.MinClusterVersion))
|
||||
maxV := semver.Must(semver.NewVersion(version.Version))
|
||||
maxV = &semver.Version{
|
||||
Major: maxV.Major,
|
||||
Minor: maxV.Minor,
|
||||
}
|
||||
|
||||
return isCompatibleWithVers(vers, local, minV, maxV)
|
||||
}
|
||||
|
||||
func isCompatibleWithVers(vers map[string]*version.Versions, local types.ID, minV, maxV *semver.Version) bool {
|
||||
var ok bool
|
||||
for id, v := range vers {
|
||||
// ignore comparison with local version
|
||||
if id == local.String() {
|
||||
continue
|
||||
}
|
||||
if v == nil {
|
||||
continue
|
||||
}
|
||||
clusterv, err := semver.NewVersion(v.Cluster)
|
||||
if err != nil {
|
||||
plog.Errorf("cannot understand the cluster version of member %s (%v)", id, err)
|
||||
continue
|
||||
}
|
||||
if clusterv.LessThan(*minV) {
|
||||
plog.Warningf("the running cluster version(%v) is lower than the minimal cluster version(%v) supported", clusterv.String(), minV.String())
|
||||
return false
|
||||
}
|
||||
if maxV.LessThan(*clusterv) {
|
||||
plog.Warningf("the running cluster version(%v) is higher than the maximum cluster version(%v) supported", clusterv.String(), maxV.String())
|
||||
return false
|
||||
}
|
||||
ok = true
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
// getVersion returns the Versions of the given member via its
|
||||
// peerURLs. Returns the last error if it fails to get the version.
|
||||
func getVersion(m *membership.Member, rt http.RoundTripper) (*version.Versions, error) {
|
||||
cc := &http.Client{
|
||||
Transport: rt,
|
||||
}
|
||||
var (
|
||||
err error
|
||||
resp *http.Response
|
||||
)
|
||||
|
||||
for _, u := range m.PeerURLs {
|
||||
resp, err = cc.Get(u + "/version")
|
||||
if err != nil {
|
||||
plog.Warningf("failed to reach the peerURL(%s) of member %s (%v)", u, m.ID, err)
|
||||
continue
|
||||
}
|
||||
var b []byte
|
||||
b, err = ioutil.ReadAll(resp.Body)
|
||||
resp.Body.Close()
|
||||
if err != nil {
|
||||
plog.Warningf("failed to read out the response body from the peerURL(%s) of member %s (%v)", u, m.ID, err)
|
||||
continue
|
||||
}
|
||||
var vers version.Versions
|
||||
if err = json.Unmarshal(b, &vers); err != nil {
|
||||
plog.Warningf("failed to unmarshal the response body got from the peerURL(%s) of member %s (%v)", u, m.ID, err)
|
||||
continue
|
||||
}
|
||||
return &vers, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
204
vendor/github.com/coreos/etcd/etcdserver/config.go
generated
vendored
Normal file
204
vendor/github.com/coreos/etcd/etcdserver/config.go
generated
vendored
Normal file
|
@ -0,0 +1,204 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
||||
"github.com/coreos/etcd/pkg/netutil"
|
||||
"github.com/coreos/etcd/pkg/transport"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
)
|
||||
|
||||
// ServerConfig holds the configuration of etcd as taken from the command line or discovery.
|
||||
type ServerConfig struct {
|
||||
Name string
|
||||
DiscoveryURL string
|
||||
DiscoveryProxy string
|
||||
ClientURLs types.URLs
|
||||
PeerURLs types.URLs
|
||||
DataDir string
|
||||
// DedicatedWALDir config will make the etcd to write the WAL to the WALDir
|
||||
// rather than the dataDir/member/wal.
|
||||
DedicatedWALDir string
|
||||
SnapCount uint64
|
||||
MaxSnapFiles uint
|
||||
MaxWALFiles uint
|
||||
InitialPeerURLsMap types.URLsMap
|
||||
InitialClusterToken string
|
||||
NewCluster bool
|
||||
ForceNewCluster bool
|
||||
PeerTLSInfo transport.TLSInfo
|
||||
|
||||
TickMs uint
|
||||
ElectionTicks int
|
||||
BootstrapTimeout time.Duration
|
||||
|
||||
AutoCompactionRetention int
|
||||
QuotaBackendBytes int64
|
||||
|
||||
StrictReconfigCheck bool
|
||||
|
||||
// ClientCertAuthEnabled is true when cert has been signed by the client CA.
|
||||
ClientCertAuthEnabled bool
|
||||
|
||||
AuthToken string
|
||||
}
|
||||
|
||||
// VerifyBootstrap sanity-checks the initial config for bootstrap case
|
||||
// and returns an error for things that should never happen.
|
||||
func (c *ServerConfig) VerifyBootstrap() error {
|
||||
if err := c.hasLocalMember(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.advertiseMatchesCluster(); err != nil {
|
||||
return err
|
||||
}
|
||||
if checkDuplicateURL(c.InitialPeerURLsMap) {
|
||||
return fmt.Errorf("initial cluster %s has duplicate url", c.InitialPeerURLsMap)
|
||||
}
|
||||
if c.InitialPeerURLsMap.String() == "" && c.DiscoveryURL == "" {
|
||||
return fmt.Errorf("initial cluster unset and no discovery URL found")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// VerifyJoinExisting sanity-checks the initial config for join existing cluster
|
||||
// case and returns an error for things that should never happen.
|
||||
func (c *ServerConfig) VerifyJoinExisting() error {
|
||||
// The member has announced its peer urls to the cluster before starting; no need to
|
||||
// set the configuration again.
|
||||
if err := c.hasLocalMember(); err != nil {
|
||||
return err
|
||||
}
|
||||
if checkDuplicateURL(c.InitialPeerURLsMap) {
|
||||
return fmt.Errorf("initial cluster %s has duplicate url", c.InitialPeerURLsMap)
|
||||
}
|
||||
if c.DiscoveryURL != "" {
|
||||
return fmt.Errorf("discovery URL should not be set when joining existing initial cluster")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// hasLocalMember checks that the cluster at least contains the local server.
|
||||
func (c *ServerConfig) hasLocalMember() error {
|
||||
if urls := c.InitialPeerURLsMap[c.Name]; urls == nil {
|
||||
return fmt.Errorf("couldn't find local name %q in the initial cluster configuration", c.Name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// advertiseMatchesCluster confirms peer URLs match those in the cluster peer list.
|
||||
func (c *ServerConfig) advertiseMatchesCluster() error {
|
||||
urls, apurls := c.InitialPeerURLsMap[c.Name], c.PeerURLs.StringSlice()
|
||||
urls.Sort()
|
||||
sort.Strings(apurls)
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 30*time.Second)
|
||||
defer cancel()
|
||||
if !netutil.URLStringsEqual(ctx, apurls, urls.StringSlice()) {
|
||||
umap := map[string]types.URLs{c.Name: c.PeerURLs}
|
||||
return fmt.Errorf("--initial-cluster must include %s given --initial-advertise-peer-urls=%s", types.URLsMap(umap).String(), strings.Join(apurls, ","))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *ServerConfig) MemberDir() string { return filepath.Join(c.DataDir, "member") }
|
||||
|
||||
func (c *ServerConfig) WALDir() string {
|
||||
if c.DedicatedWALDir != "" {
|
||||
return c.DedicatedWALDir
|
||||
}
|
||||
return filepath.Join(c.MemberDir(), "wal")
|
||||
}
|
||||
|
||||
func (c *ServerConfig) SnapDir() string { return filepath.Join(c.MemberDir(), "snap") }
|
||||
|
||||
func (c *ServerConfig) ShouldDiscover() bool { return c.DiscoveryURL != "" }
|
||||
|
||||
// ReqTimeout returns timeout for request to finish.
|
||||
func (c *ServerConfig) ReqTimeout() time.Duration {
|
||||
// 5s for queue waiting, computation and disk IO delay
|
||||
// + 2 * election timeout for possible leader election
|
||||
return 5*time.Second + 2*time.Duration(c.ElectionTicks)*time.Duration(c.TickMs)*time.Millisecond
|
||||
}
|
||||
|
||||
func (c *ServerConfig) electionTimeout() time.Duration {
|
||||
return time.Duration(c.ElectionTicks) * time.Duration(c.TickMs) * time.Millisecond
|
||||
}
|
||||
|
||||
func (c *ServerConfig) peerDialTimeout() time.Duration {
|
||||
// 1s for queue wait and system delay
|
||||
// + one RTT, which is smaller than 1/5 election timeout
|
||||
return time.Second + time.Duration(c.ElectionTicks)*time.Duration(c.TickMs)*time.Millisecond/5
|
||||
}
|
||||
|
||||
func (c *ServerConfig) PrintWithInitial() { c.print(true) }
|
||||
|
||||
func (c *ServerConfig) Print() { c.print(false) }
|
||||
|
||||
func (c *ServerConfig) print(initial bool) {
|
||||
plog.Infof("name = %s", c.Name)
|
||||
if c.ForceNewCluster {
|
||||
plog.Infof("force new cluster")
|
||||
}
|
||||
plog.Infof("data dir = %s", c.DataDir)
|
||||
plog.Infof("member dir = %s", c.MemberDir())
|
||||
if c.DedicatedWALDir != "" {
|
||||
plog.Infof("dedicated WAL dir = %s", c.DedicatedWALDir)
|
||||
}
|
||||
plog.Infof("heartbeat = %dms", c.TickMs)
|
||||
plog.Infof("election = %dms", c.ElectionTicks*int(c.TickMs))
|
||||
plog.Infof("snapshot count = %d", c.SnapCount)
|
||||
if len(c.DiscoveryURL) != 0 {
|
||||
plog.Infof("discovery URL= %s", c.DiscoveryURL)
|
||||
if len(c.DiscoveryProxy) != 0 {
|
||||
plog.Infof("discovery proxy = %s", c.DiscoveryProxy)
|
||||
}
|
||||
}
|
||||
plog.Infof("advertise client URLs = %s", c.ClientURLs)
|
||||
if initial {
|
||||
plog.Infof("initial advertise peer URLs = %s", c.PeerURLs)
|
||||
plog.Infof("initial cluster = %s", c.InitialPeerURLsMap)
|
||||
}
|
||||
}
|
||||
|
||||
func checkDuplicateURL(urlsmap types.URLsMap) bool {
|
||||
um := make(map[string]bool)
|
||||
for _, urls := range urlsmap {
|
||||
for _, url := range urls {
|
||||
u := url.String()
|
||||
if um[u] {
|
||||
return true
|
||||
}
|
||||
um[u] = true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c *ServerConfig) bootstrapTimeout() time.Duration {
|
||||
if c.BootstrapTimeout != 0 {
|
||||
return c.BootstrapTimeout
|
||||
}
|
||||
return time.Second
|
||||
}
|
||||
|
||||
func (c *ServerConfig) backendPath() string { return filepath.Join(c.SnapDir(), "db") }
|
33
vendor/github.com/coreos/etcd/etcdserver/consistent_index.go
generated
vendored
Normal file
33
vendor/github.com/coreos/etcd/etcdserver/consistent_index.go
generated
vendored
Normal file
|
@ -0,0 +1,33 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
)
|
||||
|
||||
// consistentIndex represents the offset of an entry in a consistent replica log.
|
||||
// It implements the mvcc.ConsistentIndexGetter interface.
|
||||
// It is always set to the offset of current entry before executing the entry,
|
||||
// so ConsistentWatchableKV could get the consistent index from it.
|
||||
type consistentIndex uint64
|
||||
|
||||
func (i *consistentIndex) setConsistentIndex(v uint64) {
|
||||
atomic.StoreUint64((*uint64)(i), v)
|
||||
}
|
||||
|
||||
func (i *consistentIndex) ConsistentIndex() uint64 {
|
||||
return atomic.LoadUint64((*uint64)(i))
|
||||
}
|
16
vendor/github.com/coreos/etcd/etcdserver/doc.go
generated
vendored
Normal file
16
vendor/github.com/coreos/etcd/etcdserver/doc.go
generated
vendored
Normal file
|
@ -0,0 +1,16 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package etcdserver defines how etcd servers interact and store their states.
|
||||
package etcdserver
|
46
vendor/github.com/coreos/etcd/etcdserver/errors.go
generated
vendored
Normal file
46
vendor/github.com/coreos/etcd/etcdserver/errors.go
generated
vendored
Normal file
|
@ -0,0 +1,46 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrUnknownMethod = errors.New("etcdserver: unknown method")
|
||||
ErrStopped = errors.New("etcdserver: server stopped")
|
||||
ErrCanceled = errors.New("etcdserver: request cancelled")
|
||||
ErrTimeout = errors.New("etcdserver: request timed out")
|
||||
ErrTimeoutDueToLeaderFail = errors.New("etcdserver: request timed out, possibly due to previous leader failure")
|
||||
ErrTimeoutDueToConnectionLost = errors.New("etcdserver: request timed out, possibly due to connection lost")
|
||||
ErrTimeoutLeaderTransfer = errors.New("etcdserver: request timed out, leader transfer took too long")
|
||||
ErrNotEnoughStartedMembers = errors.New("etcdserver: re-configuration failed due to not enough started members")
|
||||
ErrNoLeader = errors.New("etcdserver: no leader")
|
||||
ErrRequestTooLarge = errors.New("etcdserver: request is too large")
|
||||
ErrNoSpace = errors.New("etcdserver: no space")
|
||||
ErrTooManyRequests = errors.New("etcdserver: too many requests")
|
||||
ErrUnhealthy = errors.New("etcdserver: unhealthy cluster")
|
||||
ErrKeyNotFound = errors.New("etcdserver: key not found")
|
||||
)
|
||||
|
||||
type DiscoveryError struct {
|
||||
Op string
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e DiscoveryError) Error() string {
|
||||
return fmt.Sprintf("failed to %s discovery cluster (%v)", e.Op, e.Err)
|
||||
}
|
102
vendor/github.com/coreos/etcd/etcdserver/metrics.go
generated
vendored
Normal file
102
vendor/github.com/coreos/etcd/etcdserver/metrics.go
generated
vendored
Normal file
|
@ -0,0 +1,102 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/pkg/runtime"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
var (
|
||||
hasLeader = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "server",
|
||||
Name: "has_leader",
|
||||
Help: "Whether or not a leader exists. 1 is existence, 0 is not.",
|
||||
})
|
||||
leaderChanges = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "server",
|
||||
Name: "leader_changes_seen_total",
|
||||
Help: "The number of leader changes seen.",
|
||||
})
|
||||
proposalsCommitted = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "server",
|
||||
Name: "proposals_committed_total",
|
||||
Help: "The total number of consensus proposals committed.",
|
||||
})
|
||||
proposalsApplied = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "server",
|
||||
Name: "proposals_applied_total",
|
||||
Help: "The total number of consensus proposals applied.",
|
||||
})
|
||||
proposalsPending = prometheus.NewGauge(prometheus.GaugeOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "server",
|
||||
Name: "proposals_pending",
|
||||
Help: "The current number of pending proposals to commit.",
|
||||
})
|
||||
proposalsFailed = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "etcd",
|
||||
Subsystem: "server",
|
||||
Name: "proposals_failed_total",
|
||||
Help: "The total number of failed proposals seen.",
|
||||
})
|
||||
leaseExpired = prometheus.NewCounter(prometheus.CounterOpts{
|
||||
Namespace: "etcd_debugging",
|
||||
Subsystem: "server",
|
||||
Name: "lease_expired_total",
|
||||
Help: "The total number of expired leases.",
|
||||
})
|
||||
)
|
||||
|
||||
func init() {
|
||||
prometheus.MustRegister(hasLeader)
|
||||
prometheus.MustRegister(leaderChanges)
|
||||
prometheus.MustRegister(proposalsCommitted)
|
||||
prometheus.MustRegister(proposalsApplied)
|
||||
prometheus.MustRegister(proposalsPending)
|
||||
prometheus.MustRegister(proposalsFailed)
|
||||
prometheus.MustRegister(leaseExpired)
|
||||
}
|
||||
|
||||
func monitorFileDescriptor(done <-chan struct{}) {
|
||||
ticker := time.NewTicker(5 * time.Second)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
used, err := runtime.FDUsage()
|
||||
if err != nil {
|
||||
plog.Errorf("cannot monitor file descriptor usage (%v)", err)
|
||||
return
|
||||
}
|
||||
limit, err := runtime.FDLimit()
|
||||
if err != nil {
|
||||
plog.Errorf("cannot monitor file descriptor usage (%v)", err)
|
||||
return
|
||||
}
|
||||
if used >= limit/5*4 {
|
||||
plog.Warningf("80%% of the file descriptor limit is used [used = %d, limit = %d]", used, limit)
|
||||
}
|
||||
select {
|
||||
case <-ticker.C:
|
||||
case <-done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
121
vendor/github.com/coreos/etcd/etcdserver/quota.go
generated
vendored
Normal file
121
vendor/github.com/coreos/etcd/etcdserver/quota.go
generated
vendored
Normal file
|
@ -0,0 +1,121 @@
|
|||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
)
|
||||
|
||||
const (
|
||||
// DefaultQuotaBytes is the number of bytes the backend Size may
|
||||
// consume before exceeding the space quota.
|
||||
DefaultQuotaBytes = int64(2 * 1024 * 1024 * 1024) // 2GB
|
||||
// MaxQuotaBytes is the maximum number of bytes suggested for a backend
|
||||
// quota. A larger quota may lead to degraded performance.
|
||||
MaxQuotaBytes = int64(8 * 1024 * 1024 * 1024) // 8GB
|
||||
)
|
||||
|
||||
// Quota represents an arbitrary quota against arbitrary requests. Each request
|
||||
// costs some charge; if there is not enough remaining charge, then there are
|
||||
// too few resources available within the quota to apply the request.
|
||||
type Quota interface {
|
||||
// Available judges whether the given request fits within the quota.
|
||||
Available(req interface{}) bool
|
||||
// Cost computes the charge against the quota for a given request.
|
||||
Cost(req interface{}) int
|
||||
// Remaining is the amount of charge left for the quota.
|
||||
Remaining() int64
|
||||
}
|
||||
|
||||
type passthroughQuota struct{}
|
||||
|
||||
func (*passthroughQuota) Available(interface{}) bool { return true }
|
||||
func (*passthroughQuota) Cost(interface{}) int { return 0 }
|
||||
func (*passthroughQuota) Remaining() int64 { return 1 }
|
||||
|
||||
type backendQuota struct {
|
||||
s *EtcdServer
|
||||
maxBackendBytes int64
|
||||
}
|
||||
|
||||
const (
|
||||
// leaseOverhead is an estimate for the cost of storing a lease
|
||||
leaseOverhead = 64
|
||||
// kvOverhead is an estimate for the cost of storing a key's metadata
|
||||
kvOverhead = 256
|
||||
)
|
||||
|
||||
func NewBackendQuota(s *EtcdServer) Quota {
|
||||
if s.Cfg.QuotaBackendBytes < 0 {
|
||||
// disable quotas if negative
|
||||
plog.Warningf("disabling backend quota")
|
||||
return &passthroughQuota{}
|
||||
}
|
||||
if s.Cfg.QuotaBackendBytes == 0 {
|
||||
// use default size if no quota size given
|
||||
return &backendQuota{s, DefaultQuotaBytes}
|
||||
}
|
||||
if s.Cfg.QuotaBackendBytes > MaxQuotaBytes {
|
||||
plog.Warningf("backend quota %v exceeds maximum recommended quota %v", s.Cfg.QuotaBackendBytes, MaxQuotaBytes)
|
||||
}
|
||||
return &backendQuota{s, s.Cfg.QuotaBackendBytes}
|
||||
}
|
||||
|
||||
func (b *backendQuota) Available(v interface{}) bool {
|
||||
// TODO: maybe optimize backend.Size()
|
||||
return b.s.Backend().Size()+int64(b.Cost(v)) < b.maxBackendBytes
|
||||
}
|
||||
|
||||
func (b *backendQuota) Cost(v interface{}) int {
|
||||
switch r := v.(type) {
|
||||
case *pb.PutRequest:
|
||||
return costPut(r)
|
||||
case *pb.TxnRequest:
|
||||
return costTxn(r)
|
||||
case *pb.LeaseGrantRequest:
|
||||
return leaseOverhead
|
||||
default:
|
||||
panic("unexpected cost")
|
||||
}
|
||||
}
|
||||
|
||||
func costPut(r *pb.PutRequest) int { return kvOverhead + len(r.Key) + len(r.Value) }
|
||||
|
||||
func costTxnReq(u *pb.RequestOp) int {
|
||||
r := u.GetRequestPut()
|
||||
if r == nil {
|
||||
return 0
|
||||
}
|
||||
return costPut(r)
|
||||
}
|
||||
|
||||
func costTxn(r *pb.TxnRequest) int {
|
||||
sizeSuccess := 0
|
||||
for _, u := range r.Success {
|
||||
sizeSuccess += costTxnReq(u)
|
||||
}
|
||||
sizeFailure := 0
|
||||
for _, u := range r.Failure {
|
||||
sizeFailure += costTxnReq(u)
|
||||
}
|
||||
if sizeFailure > sizeSuccess {
|
||||
return sizeFailure
|
||||
}
|
||||
return sizeSuccess
|
||||
}
|
||||
|
||||
func (b *backendQuota) Remaining() int64 {
|
||||
return b.maxBackendBytes - b.s.Backend().Size()
|
||||
}
|
594
vendor/github.com/coreos/etcd/etcdserver/raft.go
generated
vendored
Normal file
594
vendor/github.com/coreos/etcd/etcdserver/raft.go
generated
vendored
Normal file
|
@ -0,0 +1,594 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"expvar"
|
||||
"sort"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/etcdserver/membership"
|
||||
"github.com/coreos/etcd/pkg/contention"
|
||||
"github.com/coreos/etcd/pkg/pbutil"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"github.com/coreos/etcd/raft"
|
||||
"github.com/coreos/etcd/raft/raftpb"
|
||||
"github.com/coreos/etcd/rafthttp"
|
||||
"github.com/coreos/etcd/wal"
|
||||
"github.com/coreos/etcd/wal/walpb"
|
||||
"github.com/coreos/pkg/capnslog"
|
||||
)
|
||||
|
||||
const (
|
||||
// Number of entries for slow follower to catch-up after compacting
|
||||
// the raft storage entries.
|
||||
// We expect the follower has a millisecond level latency with the leader.
|
||||
// The max throughput is around 10K. Keep a 5K entries is enough for helping
|
||||
// follower to catch up.
|
||||
numberOfCatchUpEntries = 5000
|
||||
|
||||
// The max throughput of etcd will not exceed 100MB/s (100K * 1KB value).
|
||||
// Assuming the RTT is around 10ms, 1MB max size is large enough.
|
||||
maxSizePerMsg = 1 * 1024 * 1024
|
||||
// Never overflow the rafthttp buffer, which is 4096.
|
||||
// TODO: a better const?
|
||||
maxInflightMsgs = 4096 / 8
|
||||
)
|
||||
|
||||
var (
|
||||
// protects raftStatus
|
||||
raftStatusMu sync.Mutex
|
||||
// indirection for expvar func interface
|
||||
// expvar panics when publishing duplicate name
|
||||
// expvar does not support remove a registered name
|
||||
// so only register a func that calls raftStatus
|
||||
// and change raftStatus as we need.
|
||||
raftStatus func() raft.Status
|
||||
)
|
||||
|
||||
func init() {
|
||||
raft.SetLogger(capnslog.NewPackageLogger("github.com/coreos/etcd", "raft"))
|
||||
expvar.Publish("raft.status", expvar.Func(func() interface{} {
|
||||
raftStatusMu.Lock()
|
||||
defer raftStatusMu.Unlock()
|
||||
return raftStatus()
|
||||
}))
|
||||
}
|
||||
|
||||
type RaftTimer interface {
|
||||
Index() uint64
|
||||
Term() uint64
|
||||
}
|
||||
|
||||
// apply contains entries, snapshot to be applied. Once
|
||||
// an apply is consumed, the entries will be persisted to
|
||||
// to raft storage concurrently; the application must read
|
||||
// raftDone before assuming the raft messages are stable.
|
||||
type apply struct {
|
||||
entries []raftpb.Entry
|
||||
snapshot raftpb.Snapshot
|
||||
// notifyc synchronizes etcd server applies with the raft node
|
||||
notifyc chan struct{}
|
||||
}
|
||||
|
||||
type raftNode struct {
|
||||
// Cache of the latest raft index and raft term the server has seen.
|
||||
// These three unit64 fields must be the first elements to keep 64-bit
|
||||
// alignment for atomic access to the fields.
|
||||
index uint64
|
||||
term uint64
|
||||
lead uint64
|
||||
|
||||
raftNodeConfig
|
||||
|
||||
// a chan to send/receive snapshot
|
||||
msgSnapC chan raftpb.Message
|
||||
|
||||
// a chan to send out apply
|
||||
applyc chan apply
|
||||
|
||||
// a chan to send out readState
|
||||
readStateC chan raft.ReadState
|
||||
|
||||
// utility
|
||||
ticker *time.Ticker
|
||||
// contention detectors for raft heartbeat message
|
||||
td *contention.TimeoutDetector
|
||||
|
||||
stopped chan struct{}
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
type raftNodeConfig struct {
|
||||
// to check if msg receiver is removed from cluster
|
||||
isIDRemoved func(id uint64) bool
|
||||
raft.Node
|
||||
raftStorage *raft.MemoryStorage
|
||||
storage Storage
|
||||
heartbeat time.Duration // for logging
|
||||
// transport specifies the transport to send and receive msgs to members.
|
||||
// Sending messages MUST NOT block. It is okay to drop messages, since
|
||||
// clients should timeout and reissue their messages.
|
||||
// If transport is nil, server will panic.
|
||||
transport rafthttp.Transporter
|
||||
}
|
||||
|
||||
func newRaftNode(cfg raftNodeConfig) *raftNode {
|
||||
r := &raftNode{
|
||||
raftNodeConfig: cfg,
|
||||
// set up contention detectors for raft heartbeat message.
|
||||
// expect to send a heartbeat within 2 heartbeat intervals.
|
||||
td: contention.NewTimeoutDetector(2 * cfg.heartbeat),
|
||||
readStateC: make(chan raft.ReadState, 1),
|
||||
msgSnapC: make(chan raftpb.Message, maxInFlightMsgSnap),
|
||||
applyc: make(chan apply),
|
||||
stopped: make(chan struct{}),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
if r.heartbeat == 0 {
|
||||
r.ticker = &time.Ticker{}
|
||||
} else {
|
||||
r.ticker = time.NewTicker(r.heartbeat)
|
||||
}
|
||||
return r
|
||||
}
|
||||
|
||||
// start prepares and starts raftNode in a new goroutine. It is no longer safe
|
||||
// to modify the fields after it has been started.
|
||||
func (r *raftNode) start(rh *raftReadyHandler) {
|
||||
internalTimeout := time.Second
|
||||
|
||||
go func() {
|
||||
defer r.onStop()
|
||||
islead := false
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-r.ticker.C:
|
||||
r.Tick()
|
||||
case rd := <-r.Ready():
|
||||
if rd.SoftState != nil {
|
||||
newLeader := rd.SoftState.Lead != raft.None && atomic.LoadUint64(&r.lead) != rd.SoftState.Lead
|
||||
if newLeader {
|
||||
leaderChanges.Inc()
|
||||
}
|
||||
|
||||
if rd.SoftState.Lead == raft.None {
|
||||
hasLeader.Set(0)
|
||||
} else {
|
||||
hasLeader.Set(1)
|
||||
}
|
||||
|
||||
atomic.StoreUint64(&r.lead, rd.SoftState.Lead)
|
||||
islead = rd.RaftState == raft.StateLeader
|
||||
rh.updateLeadership(newLeader)
|
||||
r.td.Reset()
|
||||
}
|
||||
|
||||
if len(rd.ReadStates) != 0 {
|
||||
select {
|
||||
case r.readStateC <- rd.ReadStates[len(rd.ReadStates)-1]:
|
||||
case <-time.After(internalTimeout):
|
||||
plog.Warningf("timed out sending read state")
|
||||
case <-r.stopped:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
notifyc := make(chan struct{}, 1)
|
||||
ap := apply{
|
||||
entries: rd.CommittedEntries,
|
||||
snapshot: rd.Snapshot,
|
||||
notifyc: notifyc,
|
||||
}
|
||||
|
||||
updateCommittedIndex(&ap, rh)
|
||||
|
||||
select {
|
||||
case r.applyc <- ap:
|
||||
case <-r.stopped:
|
||||
return
|
||||
}
|
||||
|
||||
// the leader can write to its disk in parallel with replicating to the followers and them
|
||||
// writing to their disks.
|
||||
// For more details, check raft thesis 10.2.1
|
||||
if islead {
|
||||
// gofail: var raftBeforeLeaderSend struct{}
|
||||
r.transport.Send(r.processMessages(rd.Messages))
|
||||
}
|
||||
|
||||
// gofail: var raftBeforeSave struct{}
|
||||
if err := r.storage.Save(rd.HardState, rd.Entries); err != nil {
|
||||
plog.Fatalf("raft save state and entries error: %v", err)
|
||||
}
|
||||
if !raft.IsEmptyHardState(rd.HardState) {
|
||||
proposalsCommitted.Set(float64(rd.HardState.Commit))
|
||||
}
|
||||
// gofail: var raftAfterSave struct{}
|
||||
|
||||
if !raft.IsEmptySnap(rd.Snapshot) {
|
||||
// gofail: var raftBeforeSaveSnap struct{}
|
||||
if err := r.storage.SaveSnap(rd.Snapshot); err != nil {
|
||||
plog.Fatalf("raft save snapshot error: %v", err)
|
||||
}
|
||||
// etcdserver now claim the snapshot has been persisted onto the disk
|
||||
notifyc <- struct{}{}
|
||||
|
||||
// gofail: var raftAfterSaveSnap struct{}
|
||||
r.raftStorage.ApplySnapshot(rd.Snapshot)
|
||||
plog.Infof("raft applied incoming snapshot at index %d", rd.Snapshot.Metadata.Index)
|
||||
// gofail: var raftAfterApplySnap struct{}
|
||||
}
|
||||
|
||||
r.raftStorage.Append(rd.Entries)
|
||||
|
||||
if !islead {
|
||||
// finish processing incoming messages before we signal raftdone chan
|
||||
msgs := r.processMessages(rd.Messages)
|
||||
|
||||
// now unblocks 'applyAll' that waits on Raft log disk writes before triggering snapshots
|
||||
notifyc <- struct{}{}
|
||||
|
||||
// Candidate or follower needs to wait for all pending configuration
|
||||
// changes to be applied before sending messages.
|
||||
// Otherwise we might incorrectly count votes (e.g. votes from removed members).
|
||||
// Also slow machine's follower raft-layer could proceed to become the leader
|
||||
// on its own single-node cluster, before apply-layer applies the config change.
|
||||
// We simply wait for ALL pending entries to be applied for now.
|
||||
// We might improve this later on if it causes unnecessary long blocking issues.
|
||||
waitApply := false
|
||||
for _, ent := range rd.CommittedEntries {
|
||||
if ent.Type == raftpb.EntryConfChange {
|
||||
waitApply = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if waitApply {
|
||||
// blocks until 'applyAll' calls 'applyWait.Trigger'
|
||||
// to be in sync with scheduled config-change job
|
||||
// (assume notifyc has cap of 1)
|
||||
select {
|
||||
case notifyc <- struct{}{}:
|
||||
case <-r.stopped:
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// gofail: var raftBeforeFollowerSend struct{}
|
||||
r.transport.Send(msgs)
|
||||
} else {
|
||||
// leader already processed 'MsgSnap' and signaled
|
||||
notifyc <- struct{}{}
|
||||
}
|
||||
|
||||
r.Advance()
|
||||
case <-r.stopped:
|
||||
return
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func updateCommittedIndex(ap *apply, rh *raftReadyHandler) {
|
||||
var ci uint64
|
||||
if len(ap.entries) != 0 {
|
||||
ci = ap.entries[len(ap.entries)-1].Index
|
||||
}
|
||||
if ap.snapshot.Metadata.Index > ci {
|
||||
ci = ap.snapshot.Metadata.Index
|
||||
}
|
||||
if ci != 0 {
|
||||
rh.updateCommittedIndex(ci)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *raftNode) processMessages(ms []raftpb.Message) []raftpb.Message {
|
||||
sentAppResp := false
|
||||
for i := len(ms) - 1; i >= 0; i-- {
|
||||
if r.isIDRemoved(ms[i].To) {
|
||||
ms[i].To = 0
|
||||
}
|
||||
|
||||
if ms[i].Type == raftpb.MsgAppResp {
|
||||
if sentAppResp {
|
||||
ms[i].To = 0
|
||||
} else {
|
||||
sentAppResp = true
|
||||
}
|
||||
}
|
||||
|
||||
if ms[i].Type == raftpb.MsgSnap {
|
||||
// There are two separate data store: the store for v2, and the KV for v3.
|
||||
// The msgSnap only contains the most recent snapshot of store without KV.
|
||||
// So we need to redirect the msgSnap to etcd server main loop for merging in the
|
||||
// current store snapshot and KV snapshot.
|
||||
select {
|
||||
case r.msgSnapC <- ms[i]:
|
||||
default:
|
||||
// drop msgSnap if the inflight chan if full.
|
||||
}
|
||||
ms[i].To = 0
|
||||
}
|
||||
if ms[i].Type == raftpb.MsgHeartbeat {
|
||||
ok, exceed := r.td.Observe(ms[i].To)
|
||||
if !ok {
|
||||
// TODO: limit request rate.
|
||||
plog.Warningf("failed to send out heartbeat on time (exceeded the %v timeout for %v)", r.heartbeat, exceed)
|
||||
plog.Warningf("server is likely overloaded")
|
||||
}
|
||||
}
|
||||
}
|
||||
return ms
|
||||
}
|
||||
|
||||
func (r *raftNode) apply() chan apply {
|
||||
return r.applyc
|
||||
}
|
||||
|
||||
func (r *raftNode) stop() {
|
||||
r.stopped <- struct{}{}
|
||||
<-r.done
|
||||
}
|
||||
|
||||
func (r *raftNode) onStop() {
|
||||
r.Stop()
|
||||
r.ticker.Stop()
|
||||
r.transport.Stop()
|
||||
if err := r.storage.Close(); err != nil {
|
||||
plog.Panicf("raft close storage error: %v", err)
|
||||
}
|
||||
close(r.done)
|
||||
}
|
||||
|
||||
// for testing
|
||||
func (r *raftNode) pauseSending() {
|
||||
p := r.transport.(rafthttp.Pausable)
|
||||
p.Pause()
|
||||
}
|
||||
|
||||
func (r *raftNode) resumeSending() {
|
||||
p := r.transport.(rafthttp.Pausable)
|
||||
p.Resume()
|
||||
}
|
||||
|
||||
// advanceTicksForElection advances ticks to the node for fast election.
|
||||
// This reduces the time to wait for first leader election if bootstrapping the whole
|
||||
// cluster, while leaving at least 1 heartbeat for possible existing leader
|
||||
// to contact it.
|
||||
func advanceTicksForElection(n raft.Node, electionTicks int) {
|
||||
for i := 0; i < electionTicks-1; i++ {
|
||||
n.Tick()
|
||||
}
|
||||
}
|
||||
|
||||
func startNode(cfg *ServerConfig, cl *membership.RaftCluster, ids []types.ID) (id types.ID, n raft.Node, s *raft.MemoryStorage, w *wal.WAL) {
|
||||
var err error
|
||||
member := cl.MemberByName(cfg.Name)
|
||||
metadata := pbutil.MustMarshal(
|
||||
&pb.Metadata{
|
||||
NodeID: uint64(member.ID),
|
||||
ClusterID: uint64(cl.ID()),
|
||||
},
|
||||
)
|
||||
if w, err = wal.Create(cfg.WALDir(), metadata); err != nil {
|
||||
plog.Fatalf("create wal error: %v", err)
|
||||
}
|
||||
peers := make([]raft.Peer, len(ids))
|
||||
for i, id := range ids {
|
||||
ctx, err := json.Marshal((*cl).Member(id))
|
||||
if err != nil {
|
||||
plog.Panicf("marshal member should never fail: %v", err)
|
||||
}
|
||||
peers[i] = raft.Peer{ID: uint64(id), Context: ctx}
|
||||
}
|
||||
id = member.ID
|
||||
plog.Infof("starting member %s in cluster %s", id, cl.ID())
|
||||
s = raft.NewMemoryStorage()
|
||||
c := &raft.Config{
|
||||
ID: uint64(id),
|
||||
ElectionTick: cfg.ElectionTicks,
|
||||
HeartbeatTick: 1,
|
||||
Storage: s,
|
||||
MaxSizePerMsg: maxSizePerMsg,
|
||||
MaxInflightMsgs: maxInflightMsgs,
|
||||
CheckQuorum: true,
|
||||
}
|
||||
|
||||
n = raft.StartNode(c, peers)
|
||||
raftStatusMu.Lock()
|
||||
raftStatus = n.Status
|
||||
raftStatusMu.Unlock()
|
||||
advanceTicksForElection(n, c.ElectionTick)
|
||||
return
|
||||
}
|
||||
|
||||
func restartNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
|
||||
var walsnap walpb.Snapshot
|
||||
if snapshot != nil {
|
||||
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
|
||||
}
|
||||
w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
|
||||
|
||||
plog.Infof("restarting member %s in cluster %s at commit index %d", id, cid, st.Commit)
|
||||
cl := membership.NewCluster("")
|
||||
cl.SetID(cid)
|
||||
s := raft.NewMemoryStorage()
|
||||
if snapshot != nil {
|
||||
s.ApplySnapshot(*snapshot)
|
||||
}
|
||||
s.SetHardState(st)
|
||||
s.Append(ents)
|
||||
c := &raft.Config{
|
||||
ID: uint64(id),
|
||||
ElectionTick: cfg.ElectionTicks,
|
||||
HeartbeatTick: 1,
|
||||
Storage: s,
|
||||
MaxSizePerMsg: maxSizePerMsg,
|
||||
MaxInflightMsgs: maxInflightMsgs,
|
||||
CheckQuorum: true,
|
||||
}
|
||||
|
||||
n := raft.RestartNode(c)
|
||||
raftStatusMu.Lock()
|
||||
raftStatus = n.Status
|
||||
raftStatusMu.Unlock()
|
||||
advanceTicksForElection(n, c.ElectionTick)
|
||||
return id, cl, n, s, w
|
||||
}
|
||||
|
||||
func restartAsStandaloneNode(cfg *ServerConfig, snapshot *raftpb.Snapshot) (types.ID, *membership.RaftCluster, raft.Node, *raft.MemoryStorage, *wal.WAL) {
|
||||
var walsnap walpb.Snapshot
|
||||
if snapshot != nil {
|
||||
walsnap.Index, walsnap.Term = snapshot.Metadata.Index, snapshot.Metadata.Term
|
||||
}
|
||||
w, id, cid, st, ents := readWAL(cfg.WALDir(), walsnap)
|
||||
|
||||
// discard the previously uncommitted entries
|
||||
for i, ent := range ents {
|
||||
if ent.Index > st.Commit {
|
||||
plog.Infof("discarding %d uncommitted WAL entries ", len(ents)-i)
|
||||
ents = ents[:i]
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// force append the configuration change entries
|
||||
toAppEnts := createConfigChangeEnts(getIDs(snapshot, ents), uint64(id), st.Term, st.Commit)
|
||||
ents = append(ents, toAppEnts...)
|
||||
|
||||
// force commit newly appended entries
|
||||
err := w.Save(raftpb.HardState{}, toAppEnts)
|
||||
if err != nil {
|
||||
plog.Fatalf("%v", err)
|
||||
}
|
||||
if len(ents) != 0 {
|
||||
st.Commit = ents[len(ents)-1].Index
|
||||
}
|
||||
|
||||
plog.Printf("forcing restart of member %s in cluster %s at commit index %d", id, cid, st.Commit)
|
||||
cl := membership.NewCluster("")
|
||||
cl.SetID(cid)
|
||||
s := raft.NewMemoryStorage()
|
||||
if snapshot != nil {
|
||||
s.ApplySnapshot(*snapshot)
|
||||
}
|
||||
s.SetHardState(st)
|
||||
s.Append(ents)
|
||||
c := &raft.Config{
|
||||
ID: uint64(id),
|
||||
ElectionTick: cfg.ElectionTicks,
|
||||
HeartbeatTick: 1,
|
||||
Storage: s,
|
||||
MaxSizePerMsg: maxSizePerMsg,
|
||||
MaxInflightMsgs: maxInflightMsgs,
|
||||
}
|
||||
n := raft.RestartNode(c)
|
||||
raftStatus = n.Status
|
||||
return id, cl, n, s, w
|
||||
}
|
||||
|
||||
// getIDs returns an ordered set of IDs included in the given snapshot and
|
||||
// the entries. The given snapshot/entries can contain two kinds of
|
||||
// ID-related entry:
|
||||
// - ConfChangeAddNode, in which case the contained ID will be added into the set.
|
||||
// - ConfChangeRemoveNode, in which case the contained ID will be removed from the set.
|
||||
func getIDs(snap *raftpb.Snapshot, ents []raftpb.Entry) []uint64 {
|
||||
ids := make(map[uint64]bool)
|
||||
if snap != nil {
|
||||
for _, id := range snap.Metadata.ConfState.Nodes {
|
||||
ids[id] = true
|
||||
}
|
||||
}
|
||||
for _, e := range ents {
|
||||
if e.Type != raftpb.EntryConfChange {
|
||||
continue
|
||||
}
|
||||
var cc raftpb.ConfChange
|
||||
pbutil.MustUnmarshal(&cc, e.Data)
|
||||
switch cc.Type {
|
||||
case raftpb.ConfChangeAddNode:
|
||||
ids[cc.NodeID] = true
|
||||
case raftpb.ConfChangeRemoveNode:
|
||||
delete(ids, cc.NodeID)
|
||||
case raftpb.ConfChangeUpdateNode:
|
||||
// do nothing
|
||||
default:
|
||||
plog.Panicf("ConfChange Type should be either ConfChangeAddNode or ConfChangeRemoveNode!")
|
||||
}
|
||||
}
|
||||
sids := make(types.Uint64Slice, 0, len(ids))
|
||||
for id := range ids {
|
||||
sids = append(sids, id)
|
||||
}
|
||||
sort.Sort(sids)
|
||||
return []uint64(sids)
|
||||
}
|
||||
|
||||
// createConfigChangeEnts creates a series of Raft entries (i.e.
|
||||
// EntryConfChange) to remove the set of given IDs from the cluster. The ID
|
||||
// `self` is _not_ removed, even if present in the set.
|
||||
// If `self` is not inside the given ids, it creates a Raft entry to add a
|
||||
// default member with the given `self`.
|
||||
func createConfigChangeEnts(ids []uint64, self uint64, term, index uint64) []raftpb.Entry {
|
||||
ents := make([]raftpb.Entry, 0)
|
||||
next := index + 1
|
||||
found := false
|
||||
for _, id := range ids {
|
||||
if id == self {
|
||||
found = true
|
||||
continue
|
||||
}
|
||||
cc := &raftpb.ConfChange{
|
||||
Type: raftpb.ConfChangeRemoveNode,
|
||||
NodeID: id,
|
||||
}
|
||||
e := raftpb.Entry{
|
||||
Type: raftpb.EntryConfChange,
|
||||
Data: pbutil.MustMarshal(cc),
|
||||
Term: term,
|
||||
Index: next,
|
||||
}
|
||||
ents = append(ents, e)
|
||||
next++
|
||||
}
|
||||
if !found {
|
||||
m := membership.Member{
|
||||
ID: types.ID(self),
|
||||
RaftAttributes: membership.RaftAttributes{PeerURLs: []string{"http://localhost:2380"}},
|
||||
}
|
||||
ctx, err := json.Marshal(m)
|
||||
if err != nil {
|
||||
plog.Panicf("marshal member should never fail: %v", err)
|
||||
}
|
||||
cc := &raftpb.ConfChange{
|
||||
Type: raftpb.ConfChangeAddNode,
|
||||
NodeID: self,
|
||||
Context: ctx,
|
||||
}
|
||||
e := raftpb.Entry{
|
||||
Type: raftpb.EntryConfChange,
|
||||
Data: pbutil.MustMarshal(cc),
|
||||
Term: term,
|
||||
Index: next,
|
||||
}
|
||||
ents = append(ents, e)
|
||||
}
|
||||
return ents
|
||||
}
|
1659
vendor/github.com/coreos/etcd/etcdserver/server.go
generated
vendored
Normal file
1659
vendor/github.com/coreos/etcd/etcdserver/server.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load diff
73
vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go
generated
vendored
Normal file
73
vendor/github.com/coreos/etcd/etcdserver/snapshot_merge.go
generated
vendored
Normal file
|
@ -0,0 +1,73 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/coreos/etcd/mvcc/backend"
|
||||
"github.com/coreos/etcd/raft/raftpb"
|
||||
"github.com/coreos/etcd/snap"
|
||||
)
|
||||
|
||||
// createMergedSnapshotMessage creates a snapshot message that contains: raft status (term, conf),
|
||||
// a snapshot of v2 store inside raft.Snapshot as []byte, a snapshot of v3 KV in the top level message
|
||||
// as ReadCloser.
|
||||
func (s *EtcdServer) createMergedSnapshotMessage(m raftpb.Message, snapt, snapi uint64, confState raftpb.ConfState) snap.Message {
|
||||
// get a snapshot of v2 store as []byte
|
||||
clone := s.store.Clone()
|
||||
d, err := clone.SaveNoCopy()
|
||||
if err != nil {
|
||||
plog.Panicf("store save should never fail: %v", err)
|
||||
}
|
||||
|
||||
// commit kv to write metadata(for example: consistent index).
|
||||
s.KV().Commit()
|
||||
dbsnap := s.be.Snapshot()
|
||||
// get a snapshot of v3 KV as readCloser
|
||||
rc := newSnapshotReaderCloser(dbsnap)
|
||||
|
||||
// put the []byte snapshot of store into raft snapshot and return the merged snapshot with
|
||||
// KV readCloser snapshot.
|
||||
snapshot := raftpb.Snapshot{
|
||||
Metadata: raftpb.SnapshotMetadata{
|
||||
Index: snapi,
|
||||
Term: snapt,
|
||||
ConfState: confState,
|
||||
},
|
||||
Data: d,
|
||||
}
|
||||
m.Snapshot = snapshot
|
||||
|
||||
return *snap.NewMessage(m, rc, dbsnap.Size())
|
||||
}
|
||||
|
||||
func newSnapshotReaderCloser(snapshot backend.Snapshot) io.ReadCloser {
|
||||
pr, pw := io.Pipe()
|
||||
go func() {
|
||||
n, err := snapshot.WriteTo(pw)
|
||||
if err == nil {
|
||||
plog.Infof("wrote database snapshot out [total bytes: %d]", n)
|
||||
} else {
|
||||
plog.Warningf("failed to write database snapshot out [written bytes: %d]: %v", n, err)
|
||||
}
|
||||
pw.CloseWithError(err)
|
||||
err = snapshot.Close()
|
||||
if err != nil {
|
||||
plog.Panicf("failed to close database snapshot: %v", err)
|
||||
}
|
||||
}()
|
||||
return pr
|
||||
}
|
98
vendor/github.com/coreos/etcd/etcdserver/storage.go
generated
vendored
Normal file
98
vendor/github.com/coreos/etcd/etcdserver/storage.go
generated
vendored
Normal file
|
@ -0,0 +1,98 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/pkg/pbutil"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"github.com/coreos/etcd/raft/raftpb"
|
||||
"github.com/coreos/etcd/snap"
|
||||
"github.com/coreos/etcd/wal"
|
||||
"github.com/coreos/etcd/wal/walpb"
|
||||
)
|
||||
|
||||
type Storage interface {
|
||||
// Save function saves ents and state to the underlying stable storage.
|
||||
// Save MUST block until st and ents are on stable storage.
|
||||
Save(st raftpb.HardState, ents []raftpb.Entry) error
|
||||
// SaveSnap function saves snapshot to the underlying stable storage.
|
||||
SaveSnap(snap raftpb.Snapshot) error
|
||||
// Close closes the Storage and performs finalization.
|
||||
Close() error
|
||||
}
|
||||
|
||||
type storage struct {
|
||||
*wal.WAL
|
||||
*snap.Snapshotter
|
||||
}
|
||||
|
||||
func NewStorage(w *wal.WAL, s *snap.Snapshotter) Storage {
|
||||
return &storage{w, s}
|
||||
}
|
||||
|
||||
// SaveSnap saves the snapshot to disk and release the locked
|
||||
// wal files since they will not be used.
|
||||
func (st *storage) SaveSnap(snap raftpb.Snapshot) error {
|
||||
walsnap := walpb.Snapshot{
|
||||
Index: snap.Metadata.Index,
|
||||
Term: snap.Metadata.Term,
|
||||
}
|
||||
err := st.WAL.SaveSnapshot(walsnap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = st.Snapshotter.SaveSnap(snap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return st.WAL.ReleaseLockTo(snap.Metadata.Index)
|
||||
}
|
||||
|
||||
func readWAL(waldir string, snap walpb.Snapshot) (w *wal.WAL, id, cid types.ID, st raftpb.HardState, ents []raftpb.Entry) {
|
||||
var (
|
||||
err error
|
||||
wmetadata []byte
|
||||
)
|
||||
|
||||
repaired := false
|
||||
for {
|
||||
if w, err = wal.Open(waldir, snap); err != nil {
|
||||
plog.Fatalf("open wal error: %v", err)
|
||||
}
|
||||
if wmetadata, st, ents, err = w.ReadAll(); err != nil {
|
||||
w.Close()
|
||||
// we can only repair ErrUnexpectedEOF and we never repair twice.
|
||||
if repaired || err != io.ErrUnexpectedEOF {
|
||||
plog.Fatalf("read wal error (%v) and cannot be repaired", err)
|
||||
}
|
||||
if !wal.Repair(waldir) {
|
||||
plog.Fatalf("WAL error (%v) cannot be repaired", err)
|
||||
} else {
|
||||
plog.Infof("repaired WAL error (%v)", err)
|
||||
repaired = true
|
||||
}
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
var metadata pb.Metadata
|
||||
pbutil.MustUnmarshal(&metadata, wmetadata)
|
||||
id = types.ID(metadata.NodeID)
|
||||
cid = types.ID(metadata.ClusterID)
|
||||
return
|
||||
}
|
97
vendor/github.com/coreos/etcd/etcdserver/util.go
generated
vendored
Normal file
97
vendor/github.com/coreos/etcd/etcdserver/util.go
generated
vendored
Normal file
|
@ -0,0 +1,97 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/coreos/etcd/etcdserver/membership"
|
||||
"github.com/coreos/etcd/pkg/types"
|
||||
"github.com/coreos/etcd/rafthttp"
|
||||
)
|
||||
|
||||
// isConnectedToQuorumSince checks whether the local member is connected to the
|
||||
// quorum of the cluster since the given time.
|
||||
func isConnectedToQuorumSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool {
|
||||
return numConnectedSince(transport, since, self, members) >= (len(members)/2)+1
|
||||
}
|
||||
|
||||
// isConnectedSince checks whether the local member is connected to the
|
||||
// remote member since the given time.
|
||||
func isConnectedSince(transport rafthttp.Transporter, since time.Time, remote types.ID) bool {
|
||||
t := transport.ActiveSince(remote)
|
||||
return !t.IsZero() && t.Before(since)
|
||||
}
|
||||
|
||||
// isConnectedFullySince checks whether the local member is connected to all
|
||||
// members in the cluster since the given time.
|
||||
func isConnectedFullySince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) bool {
|
||||
return numConnectedSince(transport, since, self, members) == len(members)
|
||||
}
|
||||
|
||||
// numConnectedSince counts how many members are connected to the local member
|
||||
// since the given time.
|
||||
func numConnectedSince(transport rafthttp.Transporter, since time.Time, self types.ID, members []*membership.Member) int {
|
||||
connectedNum := 0
|
||||
for _, m := range members {
|
||||
if m.ID == self || isConnectedSince(transport, since, m.ID) {
|
||||
connectedNum++
|
||||
}
|
||||
}
|
||||
return connectedNum
|
||||
}
|
||||
|
||||
// longestConnected chooses the member with longest active-since-time.
|
||||
// It returns false, if nothing is active.
|
||||
func longestConnected(tp rafthttp.Transporter, membs []types.ID) (types.ID, bool) {
|
||||
var longest types.ID
|
||||
var oldest time.Time
|
||||
for _, id := range membs {
|
||||
tm := tp.ActiveSince(id)
|
||||
if tm.IsZero() { // inactive
|
||||
continue
|
||||
}
|
||||
|
||||
if oldest.IsZero() { // first longest candidate
|
||||
oldest = tm
|
||||
longest = id
|
||||
}
|
||||
|
||||
if tm.Before(oldest) {
|
||||
oldest = tm
|
||||
longest = id
|
||||
}
|
||||
}
|
||||
if uint64(longest) == 0 {
|
||||
return longest, false
|
||||
}
|
||||
return longest, true
|
||||
}
|
||||
|
||||
type notifier struct {
|
||||
c chan struct{}
|
||||
err error
|
||||
}
|
||||
|
||||
func newNotifier() *notifier {
|
||||
return ¬ifier{
|
||||
c: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (nc *notifier) notify(err error) {
|
||||
nc.err = err
|
||||
close(nc.c)
|
||||
}
|
125
vendor/github.com/coreos/etcd/etcdserver/v2_server.go
generated
vendored
Normal file
125
vendor/github.com/coreos/etcd/etcdserver/v2_server.go
generated
vendored
Normal file
|
@ -0,0 +1,125 @@
|
|||
// Copyright 2016 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
type v2API interface {
|
||||
Post(ctx context.Context, r *pb.Request) (Response, error)
|
||||
Put(ctx context.Context, r *pb.Request) (Response, error)
|
||||
Delete(ctx context.Context, r *pb.Request) (Response, error)
|
||||
QGet(ctx context.Context, r *pb.Request) (Response, error)
|
||||
Get(ctx context.Context, r *pb.Request) (Response, error)
|
||||
Head(ctx context.Context, r *pb.Request) (Response, error)
|
||||
}
|
||||
|
||||
type v2apiStore struct{ s *EtcdServer }
|
||||
|
||||
func (a *v2apiStore) Post(ctx context.Context, r *pb.Request) (Response, error) {
|
||||
return a.processRaftRequest(ctx, r)
|
||||
}
|
||||
|
||||
func (a *v2apiStore) Put(ctx context.Context, r *pb.Request) (Response, error) {
|
||||
return a.processRaftRequest(ctx, r)
|
||||
}
|
||||
|
||||
func (a *v2apiStore) Delete(ctx context.Context, r *pb.Request) (Response, error) {
|
||||
return a.processRaftRequest(ctx, r)
|
||||
}
|
||||
|
||||
func (a *v2apiStore) QGet(ctx context.Context, r *pb.Request) (Response, error) {
|
||||
return a.processRaftRequest(ctx, r)
|
||||
}
|
||||
|
||||
func (a *v2apiStore) processRaftRequest(ctx context.Context, r *pb.Request) (Response, error) {
|
||||
data, err := r.Marshal()
|
||||
if err != nil {
|
||||
return Response{}, err
|
||||
}
|
||||
ch := a.s.w.Register(r.ID)
|
||||
|
||||
start := time.Now()
|
||||
a.s.r.Propose(ctx, data)
|
||||
proposalsPending.Inc()
|
||||
defer proposalsPending.Dec()
|
||||
|
||||
select {
|
||||
case x := <-ch:
|
||||
resp := x.(Response)
|
||||
return resp, resp.err
|
||||
case <-ctx.Done():
|
||||
proposalsFailed.Inc()
|
||||
a.s.w.Trigger(r.ID, nil) // GC wait
|
||||
return Response{}, a.s.parseProposeCtxErr(ctx.Err(), start)
|
||||
case <-a.s.stopping:
|
||||
}
|
||||
return Response{}, ErrStopped
|
||||
}
|
||||
|
||||
func (a *v2apiStore) Get(ctx context.Context, r *pb.Request) (Response, error) {
|
||||
if r.Wait {
|
||||
wc, err := a.s.store.Watch(r.Path, r.Recursive, r.Stream, r.Since)
|
||||
if err != nil {
|
||||
return Response{}, err
|
||||
}
|
||||
return Response{Watcher: wc}, nil
|
||||
}
|
||||
ev, err := a.s.store.Get(r.Path, r.Recursive, r.Sorted)
|
||||
if err != nil {
|
||||
return Response{}, err
|
||||
}
|
||||
return Response{Event: ev}, nil
|
||||
}
|
||||
|
||||
func (a *v2apiStore) Head(ctx context.Context, r *pb.Request) (Response, error) {
|
||||
ev, err := a.s.store.Get(r.Path, r.Recursive, r.Sorted)
|
||||
if err != nil {
|
||||
return Response{}, err
|
||||
}
|
||||
return Response{Event: ev}, nil
|
||||
}
|
||||
|
||||
// Do interprets r and performs an operation on s.store according to r.Method
|
||||
// and other fields. If r.Method is "POST", "PUT", "DELETE", or a "GET" with
|
||||
// Quorum == true, r will be sent through consensus before performing its
|
||||
// respective operation. Do will block until an action is performed or there is
|
||||
// an error.
|
||||
func (s *EtcdServer) Do(ctx context.Context, r pb.Request) (Response, error) {
|
||||
r.ID = s.reqIDGen.Next()
|
||||
if r.Method == "GET" && r.Quorum {
|
||||
r.Method = "QGET"
|
||||
}
|
||||
v2api := (v2API)(&v2apiStore{s})
|
||||
switch r.Method {
|
||||
case "POST":
|
||||
return v2api.Post(ctx, &r)
|
||||
case "PUT":
|
||||
return v2api.Put(ctx, &r)
|
||||
case "DELETE":
|
||||
return v2api.Delete(ctx, &r)
|
||||
case "QGET":
|
||||
return v2api.QGet(ctx, &r)
|
||||
case "GET":
|
||||
return v2api.Get(ctx, &r)
|
||||
case "HEAD":
|
||||
return v2api.Head(ctx, &r)
|
||||
}
|
||||
return Response{}, ErrUnknownMethod
|
||||
}
|
692
vendor/github.com/coreos/etcd/etcdserver/v3_server.go
generated
vendored
Normal file
692
vendor/github.com/coreos/etcd/etcdserver/v3_server.go
generated
vendored
Normal file
|
@ -0,0 +1,692 @@
|
|||
// Copyright 2015 The etcd Authors
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package etcdserver
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"time"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
|
||||
"github.com/coreos/etcd/auth"
|
||||
pb "github.com/coreos/etcd/etcdserver/etcdserverpb"
|
||||
"github.com/coreos/etcd/etcdserver/membership"
|
||||
"github.com/coreos/etcd/lease"
|
||||
"github.com/coreos/etcd/lease/leasehttp"
|
||||
"github.com/coreos/etcd/mvcc"
|
||||
"github.com/coreos/etcd/raft"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
const (
|
||||
// the max request size that raft accepts.
|
||||
// TODO: make this a flag? But we probably do not want to
|
||||
// accept large request which might block raft stream. User
|
||||
// specify a large value might end up with shooting in the foot.
|
||||
maxRequestBytes = 1.5 * 1024 * 1024
|
||||
|
||||
// In the health case, there might be a small gap (10s of entries) between
|
||||
// the applied index and committed index.
|
||||
// However, if the committed entries are very heavy to apply, the gap might grow.
|
||||
// We should stop accepting new proposals if the gap growing to a certain point.
|
||||
maxGapBetweenApplyAndCommitIndex = 5000
|
||||
)
|
||||
|
||||
type RaftKV interface {
|
||||
Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error)
|
||||
Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error)
|
||||
DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error)
|
||||
Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error)
|
||||
Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error)
|
||||
}
|
||||
|
||||
type Lessor interface {
|
||||
// LeaseGrant sends LeaseGrant request to raft and apply it after committed.
|
||||
LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error)
|
||||
// LeaseRevoke sends LeaseRevoke request to raft and apply it after committed.
|
||||
LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error)
|
||||
|
||||
// LeaseRenew renews the lease with given ID. The renewed TTL is returned. Or an error
|
||||
// is returned.
|
||||
LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error)
|
||||
|
||||
// LeaseTimeToLive retrieves lease information.
|
||||
LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error)
|
||||
}
|
||||
|
||||
type Authenticator interface {
|
||||
AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error)
|
||||
AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error)
|
||||
Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error)
|
||||
UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error)
|
||||
UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error)
|
||||
UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error)
|
||||
UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error)
|
||||
UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error)
|
||||
UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error)
|
||||
RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error)
|
||||
RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error)
|
||||
RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error)
|
||||
RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error)
|
||||
RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error)
|
||||
UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error)
|
||||
RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error)
|
||||
}
|
||||
|
||||
func (s *EtcdServer) Range(ctx context.Context, r *pb.RangeRequest) (*pb.RangeResponse, error) {
|
||||
if !r.Serializable {
|
||||
err := s.linearizableReadNotify(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var resp *pb.RangeResponse
|
||||
var err error
|
||||
chk := func(ai *auth.AuthInfo) error {
|
||||
return s.authStore.IsRangePermitted(ai, r.Key, r.RangeEnd)
|
||||
}
|
||||
get := func() { resp, err = s.applyV3Base.Range(nil, r) }
|
||||
if serr := s.doSerialize(ctx, chk, get); serr != nil {
|
||||
return nil, serr
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
|
||||
func (s *EtcdServer) Put(ctx context.Context, r *pb.PutRequest) (*pb.PutResponse, error) {
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Put: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*pb.PutResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) DeleteRange(ctx context.Context, r *pb.DeleteRangeRequest) (*pb.DeleteRangeResponse, error) {
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{DeleteRange: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*pb.DeleteRangeResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) Txn(ctx context.Context, r *pb.TxnRequest) (*pb.TxnResponse, error) {
|
||||
if isTxnReadonly(r) {
|
||||
if !isTxnSerializable(r) {
|
||||
err := s.linearizableReadNotify(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var resp *pb.TxnResponse
|
||||
var err error
|
||||
chk := func(ai *auth.AuthInfo) error {
|
||||
return checkTxnAuth(s.authStore, ai, r)
|
||||
}
|
||||
get := func() { resp, err = s.applyV3Base.Txn(r) }
|
||||
if serr := s.doSerialize(ctx, chk, get); serr != nil {
|
||||
return nil, serr
|
||||
}
|
||||
return resp, err
|
||||
}
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{Txn: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*pb.TxnResponse), nil
|
||||
}
|
||||
|
||||
func isTxnSerializable(r *pb.TxnRequest) bool {
|
||||
for _, u := range r.Success {
|
||||
if r := u.GetRequestRange(); r == nil || !r.Serializable {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for _, u := range r.Failure {
|
||||
if r := u.GetRequestRange(); r == nil || !r.Serializable {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func isTxnReadonly(r *pb.TxnRequest) bool {
|
||||
for _, u := range r.Success {
|
||||
if r := u.GetRequestRange(); r == nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for _, u := range r.Failure {
|
||||
if r := u.GetRequestRange(); r == nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (s *EtcdServer) Compact(ctx context.Context, r *pb.CompactionRequest) (*pb.CompactionResponse, error) {
|
||||
result, err := s.processInternalRaftRequestOnce(ctx, pb.InternalRaftRequest{Compaction: r})
|
||||
if r.Physical && result != nil && result.physc != nil {
|
||||
<-result.physc
|
||||
// The compaction is done deleting keys; the hash is now settled
|
||||
// but the data is not necessarily committed. If there's a crash,
|
||||
// the hash may revert to a hash prior to compaction completing
|
||||
// if the compaction resumes. Force the finished compaction to
|
||||
// commit so it won't resume following a crash.
|
||||
s.be.ForceCommit()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
resp := result.resp.(*pb.CompactionResponse)
|
||||
if resp == nil {
|
||||
resp = &pb.CompactionResponse{}
|
||||
}
|
||||
if resp.Header == nil {
|
||||
resp.Header = &pb.ResponseHeader{}
|
||||
}
|
||||
resp.Header.Revision = s.kv.Rev()
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) LeaseGrant(ctx context.Context, r *pb.LeaseGrantRequest) (*pb.LeaseGrantResponse, error) {
|
||||
// no id given? choose one
|
||||
for r.ID == int64(lease.NoLease) {
|
||||
// only use positive int64 id's
|
||||
r.ID = int64(s.reqIDGen.Next() & ((1 << 63) - 1))
|
||||
}
|
||||
resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseGrant: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*pb.LeaseGrantResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) LeaseRevoke(ctx context.Context, r *pb.LeaseRevokeRequest) (*pb.LeaseRevokeResponse, error) {
|
||||
resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{LeaseRevoke: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*pb.LeaseRevokeResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) LeaseRenew(ctx context.Context, id lease.LeaseID) (int64, error) {
|
||||
ttl, err := s.lessor.Renew(id)
|
||||
if err == nil { // already requested to primary lessor(leader)
|
||||
return ttl, nil
|
||||
}
|
||||
if err != lease.ErrNotPrimary {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
|
||||
defer cancel()
|
||||
|
||||
// renewals don't go through raft; forward to leader manually
|
||||
for cctx.Err() == nil && err != nil {
|
||||
leader, lerr := s.waitLeader(cctx)
|
||||
if lerr != nil {
|
||||
return -1, lerr
|
||||
}
|
||||
for _, url := range leader.PeerURLs {
|
||||
lurl := url + leasehttp.LeasePrefix
|
||||
ttl, err = leasehttp.RenewHTTP(cctx, id, lurl, s.peerRt)
|
||||
if err == nil || err == lease.ErrLeaseNotFound {
|
||||
return ttl, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return -1, ErrTimeout
|
||||
}
|
||||
|
||||
func (s *EtcdServer) LeaseTimeToLive(ctx context.Context, r *pb.LeaseTimeToLiveRequest) (*pb.LeaseTimeToLiveResponse, error) {
|
||||
if s.Leader() == s.ID() {
|
||||
// primary; timetolive directly from leader
|
||||
le := s.lessor.Lookup(lease.LeaseID(r.ID))
|
||||
if le == nil {
|
||||
return nil, lease.ErrLeaseNotFound
|
||||
}
|
||||
// TODO: fill out ResponseHeader
|
||||
resp := &pb.LeaseTimeToLiveResponse{Header: &pb.ResponseHeader{}, ID: r.ID, TTL: int64(le.Remaining().Seconds()), GrantedTTL: le.TTL()}
|
||||
if r.Keys {
|
||||
ks := le.Keys()
|
||||
kbs := make([][]byte, len(ks))
|
||||
for i := range ks {
|
||||
kbs[i] = []byte(ks[i])
|
||||
}
|
||||
resp.Keys = kbs
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
|
||||
defer cancel()
|
||||
|
||||
// forward to leader
|
||||
for cctx.Err() == nil {
|
||||
leader, err := s.waitLeader(cctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, url := range leader.PeerURLs {
|
||||
lurl := url + leasehttp.LeaseInternalPrefix
|
||||
resp, err := leasehttp.TimeToLiveHTTP(cctx, lease.LeaseID(r.ID), r.Keys, lurl, s.peerRt)
|
||||
if err == nil {
|
||||
return resp.LeaseTimeToLiveResponse, nil
|
||||
}
|
||||
if err == lease.ErrLeaseNotFound {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil, ErrTimeout
|
||||
}
|
||||
|
||||
func (s *EtcdServer) waitLeader(ctx context.Context) (*membership.Member, error) {
|
||||
leader := s.cluster.Member(s.Leader())
|
||||
for leader == nil {
|
||||
// wait an election
|
||||
dur := time.Duration(s.Cfg.ElectionTicks) * time.Duration(s.Cfg.TickMs) * time.Millisecond
|
||||
select {
|
||||
case <-time.After(dur):
|
||||
leader = s.cluster.Member(s.Leader())
|
||||
case <-s.stopping:
|
||||
return nil, ErrStopped
|
||||
case <-ctx.Done():
|
||||
return nil, ErrNoLeader
|
||||
}
|
||||
}
|
||||
if leader == nil || len(leader.PeerURLs) == 0 {
|
||||
return nil, ErrNoLeader
|
||||
}
|
||||
return leader, nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) Alarm(ctx context.Context, r *pb.AlarmRequest) (*pb.AlarmResponse, error) {
|
||||
resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{Alarm: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*pb.AlarmResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) AuthEnable(ctx context.Context, r *pb.AuthEnableRequest) (*pb.AuthEnableResponse, error) {
|
||||
resp, err := s.raftRequestOnce(ctx, pb.InternalRaftRequest{AuthEnable: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*pb.AuthEnableResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) AuthDisable(ctx context.Context, r *pb.AuthDisableRequest) (*pb.AuthDisableResponse, error) {
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthDisable: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*pb.AuthDisableResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) Authenticate(ctx context.Context, r *pb.AuthenticateRequest) (*pb.AuthenticateResponse, error) {
|
||||
if err := s.linearizableReadNotify(ctx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var resp proto.Message
|
||||
for {
|
||||
checkedRevision, err := s.AuthStore().CheckPassword(r.Name, r.Password)
|
||||
if err != nil {
|
||||
if err != auth.ErrAuthNotEnabled {
|
||||
plog.Errorf("invalid authentication request to user %s was issued", r.Name)
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
st, err := s.AuthStore().GenTokenPrefix()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
internalReq := &pb.InternalAuthenticateRequest{
|
||||
Name: r.Name,
|
||||
Password: r.Password,
|
||||
SimpleToken: st,
|
||||
}
|
||||
|
||||
resp, err = s.raftRequestOnce(ctx, pb.InternalRaftRequest{Authenticate: internalReq})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if checkedRevision == s.AuthStore().Revision() {
|
||||
break
|
||||
}
|
||||
plog.Infof("revision when password checked is obsolete, retrying")
|
||||
}
|
||||
|
||||
return resp.(*pb.AuthenticateResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) UserAdd(ctx context.Context, r *pb.AuthUserAddRequest) (*pb.AuthUserAddResponse, error) {
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserAdd: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*pb.AuthUserAddResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) UserDelete(ctx context.Context, r *pb.AuthUserDeleteRequest) (*pb.AuthUserDeleteResponse, error) {
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserDelete: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*pb.AuthUserDeleteResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) UserChangePassword(ctx context.Context, r *pb.AuthUserChangePasswordRequest) (*pb.AuthUserChangePasswordResponse, error) {
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserChangePassword: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*pb.AuthUserChangePasswordResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) UserGrantRole(ctx context.Context, r *pb.AuthUserGrantRoleRequest) (*pb.AuthUserGrantRoleResponse, error) {
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGrantRole: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*pb.AuthUserGrantRoleResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) UserGet(ctx context.Context, r *pb.AuthUserGetRequest) (*pb.AuthUserGetResponse, error) {
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserGet: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*pb.AuthUserGetResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) UserList(ctx context.Context, r *pb.AuthUserListRequest) (*pb.AuthUserListResponse, error) {
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserList: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*pb.AuthUserListResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) UserRevokeRole(ctx context.Context, r *pb.AuthUserRevokeRoleRequest) (*pb.AuthUserRevokeRoleResponse, error) {
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthUserRevokeRole: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*pb.AuthUserRevokeRoleResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) RoleAdd(ctx context.Context, r *pb.AuthRoleAddRequest) (*pb.AuthRoleAddResponse, error) {
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleAdd: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*pb.AuthRoleAddResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) RoleGrantPermission(ctx context.Context, r *pb.AuthRoleGrantPermissionRequest) (*pb.AuthRoleGrantPermissionResponse, error) {
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGrantPermission: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*pb.AuthRoleGrantPermissionResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) RoleGet(ctx context.Context, r *pb.AuthRoleGetRequest) (*pb.AuthRoleGetResponse, error) {
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleGet: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*pb.AuthRoleGetResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) RoleList(ctx context.Context, r *pb.AuthRoleListRequest) (*pb.AuthRoleListResponse, error) {
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleList: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*pb.AuthRoleListResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) RoleRevokePermission(ctx context.Context, r *pb.AuthRoleRevokePermissionRequest) (*pb.AuthRoleRevokePermissionResponse, error) {
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleRevokePermission: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*pb.AuthRoleRevokePermissionResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) RoleDelete(ctx context.Context, r *pb.AuthRoleDeleteRequest) (*pb.AuthRoleDeleteResponse, error) {
|
||||
resp, err := s.raftRequest(ctx, pb.InternalRaftRequest{AuthRoleDelete: r})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp.(*pb.AuthRoleDeleteResponse), nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) raftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) {
|
||||
result, err := s.processInternalRaftRequestOnce(ctx, r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if result.err != nil {
|
||||
return nil, result.err
|
||||
}
|
||||
return result.resp, nil
|
||||
}
|
||||
|
||||
func (s *EtcdServer) raftRequest(ctx context.Context, r pb.InternalRaftRequest) (proto.Message, error) {
|
||||
for {
|
||||
resp, err := s.raftRequestOnce(ctx, r)
|
||||
if err != auth.ErrAuthOldRevision {
|
||||
return resp, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// doSerialize handles the auth logic, with permissions checked by "chk", for a serialized request "get". Returns a non-nil error on authentication failure.
|
||||
func (s *EtcdServer) doSerialize(ctx context.Context, chk func(*auth.AuthInfo) error, get func()) error {
|
||||
for {
|
||||
ai, err := s.AuthInfoFromCtx(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if ai == nil {
|
||||
// chk expects non-nil AuthInfo; use empty credentials
|
||||
ai = &auth.AuthInfo{}
|
||||
}
|
||||
if err = chk(ai); err != nil {
|
||||
if err == auth.ErrAuthOldRevision {
|
||||
continue
|
||||
}
|
||||
return err
|
||||
}
|
||||
// fetch response for serialized request
|
||||
get()
|
||||
// empty credentials or current auth info means no need to retry
|
||||
if ai.Revision == 0 || ai.Revision == s.authStore.Revision() {
|
||||
return nil
|
||||
}
|
||||
// avoid TOCTOU error, retry of the request is required.
|
||||
}
|
||||
}
|
||||
|
||||
func (s *EtcdServer) processInternalRaftRequestOnce(ctx context.Context, r pb.InternalRaftRequest) (*applyResult, error) {
|
||||
ai := s.getAppliedIndex()
|
||||
ci := s.getCommittedIndex()
|
||||
if ci > ai+maxGapBetweenApplyAndCommitIndex {
|
||||
return nil, ErrTooManyRequests
|
||||
}
|
||||
|
||||
r.Header = &pb.RequestHeader{
|
||||
ID: s.reqIDGen.Next(),
|
||||
}
|
||||
|
||||
authInfo, err := s.AuthInfoFromCtx(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if authInfo != nil {
|
||||
r.Header.Username = authInfo.Username
|
||||
r.Header.AuthRevision = authInfo.Revision
|
||||
}
|
||||
|
||||
data, err := r.Marshal()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(data) > maxRequestBytes {
|
||||
return nil, ErrRequestTooLarge
|
||||
}
|
||||
|
||||
id := r.ID
|
||||
if id == 0 {
|
||||
id = r.Header.ID
|
||||
}
|
||||
ch := s.w.Register(id)
|
||||
|
||||
cctx, cancel := context.WithTimeout(ctx, s.Cfg.ReqTimeout())
|
||||
defer cancel()
|
||||
|
||||
start := time.Now()
|
||||
s.r.Propose(cctx, data)
|
||||
proposalsPending.Inc()
|
||||
defer proposalsPending.Dec()
|
||||
|
||||
select {
|
||||
case x := <-ch:
|
||||
return x.(*applyResult), nil
|
||||
case <-cctx.Done():
|
||||
proposalsFailed.Inc()
|
||||
s.w.Trigger(id, nil) // GC wait
|
||||
return nil, s.parseProposeCtxErr(cctx.Err(), start)
|
||||
case <-s.done:
|
||||
return nil, ErrStopped
|
||||
}
|
||||
}
|
||||
|
||||
// Watchable returns a watchable interface attached to the etcdserver.
|
||||
func (s *EtcdServer) Watchable() mvcc.WatchableKV { return s.KV() }
|
||||
|
||||
func (s *EtcdServer) linearizableReadLoop() {
|
||||
var rs raft.ReadState
|
||||
|
||||
for {
|
||||
ctx := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(ctx, s.reqIDGen.Next())
|
||||
|
||||
select {
|
||||
case <-s.readwaitc:
|
||||
case <-s.stopping:
|
||||
return
|
||||
}
|
||||
|
||||
nextnr := newNotifier()
|
||||
|
||||
s.readMu.Lock()
|
||||
nr := s.readNotifier
|
||||
s.readNotifier = nextnr
|
||||
s.readMu.Unlock()
|
||||
|
||||
cctx, cancel := context.WithTimeout(context.Background(), s.Cfg.ReqTimeout())
|
||||
if err := s.r.ReadIndex(cctx, ctx); err != nil {
|
||||
cancel()
|
||||
if err == raft.ErrStopped {
|
||||
return
|
||||
}
|
||||
plog.Errorf("failed to get read index from raft: %v", err)
|
||||
nr.notify(err)
|
||||
continue
|
||||
}
|
||||
cancel()
|
||||
|
||||
var (
|
||||
timeout bool
|
||||
done bool
|
||||
)
|
||||
for !timeout && !done {
|
||||
select {
|
||||
case rs = <-s.r.readStateC:
|
||||
done = bytes.Equal(rs.RequestCtx, ctx)
|
||||
if !done {
|
||||
// a previous request might time out. now we should ignore the response of it and
|
||||
// continue waiting for the response of the current requests.
|
||||
plog.Warningf("ignored out-of-date read index response (want %v, got %v)", rs.RequestCtx, ctx)
|
||||
}
|
||||
case <-time.After(s.Cfg.ReqTimeout()):
|
||||
plog.Warningf("timed out waiting for read index response")
|
||||
nr.notify(ErrTimeout)
|
||||
timeout = true
|
||||
case <-s.stopping:
|
||||
return
|
||||
}
|
||||
}
|
||||
if !done {
|
||||
continue
|
||||
}
|
||||
|
||||
if ai := s.getAppliedIndex(); ai < rs.Index {
|
||||
select {
|
||||
case <-s.applyWait.Wait(rs.Index):
|
||||
case <-s.stopping:
|
||||
return
|
||||
}
|
||||
}
|
||||
// unblock all l-reads requested at indices before rs.Index
|
||||
nr.notify(nil)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *EtcdServer) linearizableReadNotify(ctx context.Context) error {
|
||||
s.readMu.RLock()
|
||||
nc := s.readNotifier
|
||||
s.readMu.RUnlock()
|
||||
|
||||
// signal linearizable loop for current notify if it hasn't been already
|
||||
select {
|
||||
case s.readwaitc <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
|
||||
// wait for read state notification
|
||||
select {
|
||||
case <-nc.c:
|
||||
return nc.err
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-s.done:
|
||||
return ErrStopped
|
||||
}
|
||||
}
|
||||
|
||||
func (s *EtcdServer) AuthInfoFromCtx(ctx context.Context) (*auth.AuthInfo, error) {
|
||||
if s.Cfg.ClientCertAuthEnabled {
|
||||
authInfo := s.AuthStore().AuthInfoFromTLS(ctx)
|
||||
if authInfo != nil {
|
||||
return authInfo, nil
|
||||
}
|
||||
}
|
||||
|
||||
return s.AuthStore().AuthInfoFromCtx(ctx)
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue