Vendor main dependencies.
This commit is contained in:
parent
49a09ab7dd
commit
dd5e3fba01
2738 changed files with 1045689 additions and 0 deletions
78
vendor/github.com/donovanhide/eventsource/decoder.go
generated
vendored
Normal file
78
vendor/github.com/donovanhide/eventsource/decoder.go
generated
vendored
Normal file
|
@ -0,0 +1,78 @@
|
|||
package eventsource
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type publication struct {
|
||||
id, event, data string
|
||||
retry int64
|
||||
}
|
||||
|
||||
func (s *publication) Id() string { return s.id }
|
||||
func (s *publication) Event() string { return s.event }
|
||||
func (s *publication) Data() string { return s.data }
|
||||
func (s *publication) Retry() int64 { return s.retry }
|
||||
|
||||
// A Decoder is capable of reading Events from a stream.
|
||||
type Decoder struct {
|
||||
*bufio.Reader
|
||||
}
|
||||
|
||||
// NewDecoder returns a new Decoder instance that reads events
|
||||
// with the given io.Reader.
|
||||
func NewDecoder(r io.Reader) *Decoder {
|
||||
dec := &Decoder{bufio.NewReader(newNormaliser(r))}
|
||||
return dec
|
||||
}
|
||||
|
||||
// Decode reads the next Event from a stream (and will block until one
|
||||
// comes in).
|
||||
// Graceful disconnects (between events) are indicated by an io.EOF error.
|
||||
// Any error occuring mid-event is considered non-graceful and will
|
||||
// show up as some other error (most likely io.ErrUnexpectedEOF).
|
||||
func (dec *Decoder) Decode() (Event, error) {
|
||||
|
||||
// peek ahead before we start a new event so we can return EOFs
|
||||
_, err := dec.Peek(1)
|
||||
if err == io.ErrUnexpectedEOF {
|
||||
err = io.EOF
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pub := new(publication)
|
||||
for {
|
||||
line, err := dec.ReadString('\n')
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if line == "\n" {
|
||||
break
|
||||
}
|
||||
line = strings.TrimSuffix(line, "\n")
|
||||
if strings.HasPrefix(line, ":") {
|
||||
continue
|
||||
}
|
||||
sections := strings.SplitN(line, ":", 2)
|
||||
field, value := sections[0], ""
|
||||
if len(sections) == 2 {
|
||||
value = strings.TrimPrefix(sections[1], " ")
|
||||
}
|
||||
switch field {
|
||||
case "event":
|
||||
pub.event = value
|
||||
case "data":
|
||||
pub.data += value + "\n"
|
||||
case "id":
|
||||
pub.id = value
|
||||
case "retry":
|
||||
pub.retry, _ = strconv.ParseInt(value, 10, 64)
|
||||
}
|
||||
}
|
||||
pub.data = strings.TrimSuffix(pub.data, "\n")
|
||||
return pub, nil
|
||||
}
|
58
vendor/github.com/donovanhide/eventsource/encoder.go
generated
vendored
Normal file
58
vendor/github.com/donovanhide/eventsource/encoder.go
generated
vendored
Normal file
|
@ -0,0 +1,58 @@
|
|||
package eventsource
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
encFields = []struct {
|
||||
prefix string
|
||||
value func(Event) string
|
||||
}{
|
||||
{"id: ", Event.Id},
|
||||
{"event: ", Event.Event},
|
||||
{"data: ", Event.Data},
|
||||
}
|
||||
)
|
||||
|
||||
// An Encoder is capable of writing Events to a stream. Optionally
|
||||
// Events can be gzip compressed in this process.
|
||||
type Encoder struct {
|
||||
w io.Writer
|
||||
compressed bool
|
||||
}
|
||||
|
||||
// NewEncoder returns an Encoder for a given io.Writer.
|
||||
// When compressed is set to true, a gzip writer will be
|
||||
// created.
|
||||
func NewEncoder(w io.Writer, compressed bool) *Encoder {
|
||||
if compressed {
|
||||
return &Encoder{w: gzip.NewWriter(w), compressed: true}
|
||||
}
|
||||
return &Encoder{w: w}
|
||||
}
|
||||
|
||||
// Encode writes an event in the format specified by the
|
||||
// server-sent events protocol.
|
||||
func (enc *Encoder) Encode(ev Event) error {
|
||||
for _, field := range encFields {
|
||||
prefix, value := field.prefix, field.value(ev)
|
||||
if len(value) == 0 {
|
||||
continue
|
||||
}
|
||||
value = strings.Replace(value, "\n", "\n"+prefix, -1)
|
||||
if _, err := io.WriteString(enc.w, prefix+value+"\n"); err != nil {
|
||||
return fmt.Errorf("eventsource encode: %v", err)
|
||||
}
|
||||
}
|
||||
if _, err := io.WriteString(enc.w, "\n"); err != nil {
|
||||
return fmt.Errorf("eventsource encode: %v", err)
|
||||
}
|
||||
if enc.compressed {
|
||||
return enc.w.(*gzip.Writer).Flush()
|
||||
}
|
||||
return nil
|
||||
}
|
25
vendor/github.com/donovanhide/eventsource/interface.go
generated
vendored
Normal file
25
vendor/github.com/donovanhide/eventsource/interface.go
generated
vendored
Normal file
|
@ -0,0 +1,25 @@
|
|||
// Package eventsource implements a client and server to allow streaming data one-way over a HTTP connection
|
||||
// using the Server-Sent Events API http://dev.w3.org/html5/eventsource/
|
||||
//
|
||||
// The client and server respect the Last-Event-ID header.
|
||||
// If the Repository interface is implemented on the server, events can be replayed in case of a network disconnection.
|
||||
package eventsource
|
||||
|
||||
// Any event received by the client or sent by the server will implement this interface
|
||||
type Event interface {
|
||||
// Id is an identifier that can be used to allow a client to replay
|
||||
// missed Events by returning the Last-Event-Id header.
|
||||
// Return empty string if not required.
|
||||
Id() string
|
||||
// The name of the event. Return empty string if not required.
|
||||
Event() string
|
||||
// The payload of the event.
|
||||
Data() string
|
||||
}
|
||||
|
||||
// If history is required, this interface will allow clients to reply previous events through the server.
|
||||
// Both methods can be called from different goroutines concurrently, so you must make sure they are go-routine safe.
|
||||
type Repository interface {
|
||||
// Gets the Events which should follow on from the specified channel and event id.
|
||||
Replay(channel, id string) chan Event
|
||||
}
|
35
vendor/github.com/donovanhide/eventsource/normalise.go
generated
vendored
Normal file
35
vendor/github.com/donovanhide/eventsource/normalise.go
generated
vendored
Normal file
|
@ -0,0 +1,35 @@
|
|||
package eventsource
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
// A reader which normalises line endings
|
||||
// "/r" and "/r/n" are converted to "/n"
|
||||
type normaliser struct {
|
||||
r io.Reader
|
||||
lastChar byte
|
||||
}
|
||||
|
||||
func newNormaliser(r io.Reader) *normaliser {
|
||||
return &normaliser{r: r}
|
||||
}
|
||||
|
||||
func (norm *normaliser) Read(p []byte) (n int, err error) {
|
||||
n, err = norm.r.Read(p)
|
||||
for i := 0; i < n; i++ {
|
||||
switch {
|
||||
case p[i] == '\n' && norm.lastChar == '\r':
|
||||
copy(p[i:n], p[i+1:])
|
||||
norm.lastChar = p[i]
|
||||
n--
|
||||
i--
|
||||
case p[i] == '\r':
|
||||
norm.lastChar = p[i]
|
||||
p[i] = '\n'
|
||||
default:
|
||||
norm.lastChar = p[i]
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
50
vendor/github.com/donovanhide/eventsource/repository.go
generated
vendored
Normal file
50
vendor/github.com/donovanhide/eventsource/repository.go
generated
vendored
Normal file
|
@ -0,0 +1,50 @@
|
|||
package eventsource
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Example repository that uses a slice as storage for past events.
|
||||
type SliceRepository struct {
|
||||
events map[string][]Event
|
||||
lock sync.RWMutex
|
||||
}
|
||||
|
||||
func NewSliceRepository() *SliceRepository {
|
||||
return &SliceRepository{
|
||||
events: make(map[string][]Event),
|
||||
}
|
||||
}
|
||||
|
||||
func (repo SliceRepository) indexOfEvent(channel, id string) int {
|
||||
return sort.Search(len(repo.events[channel]), func(i int) bool {
|
||||
return repo.events[channel][i].Id() >= id
|
||||
})
|
||||
}
|
||||
|
||||
func (repo SliceRepository) Replay(channel, id string) (out chan Event) {
|
||||
out = make(chan Event)
|
||||
go func() {
|
||||
defer close(out)
|
||||
repo.lock.RLock()
|
||||
defer repo.lock.RUnlock()
|
||||
events := repo.events[channel][repo.indexOfEvent(channel, id):]
|
||||
for i := range events {
|
||||
out <- events[i]
|
||||
}
|
||||
}()
|
||||
return
|
||||
}
|
||||
|
||||
func (repo *SliceRepository) Add(channel string, event Event) {
|
||||
repo.lock.Lock()
|
||||
defer repo.lock.Unlock()
|
||||
i := repo.indexOfEvent(channel, event.Id())
|
||||
if i < len(repo.events[channel]) && repo.events[channel][i].Id() == event.Id() {
|
||||
repo.events[channel][i] = event
|
||||
} else {
|
||||
repo.events[channel] = append(repo.events[channel][:i], append([]Event{event}, repo.events[channel][i:]...)...)
|
||||
}
|
||||
return
|
||||
}
|
167
vendor/github.com/donovanhide/eventsource/server.go
generated
vendored
Normal file
167
vendor/github.com/donovanhide/eventsource/server.go
generated
vendored
Normal file
|
@ -0,0 +1,167 @@
|
|||
package eventsource
|
||||
|
||||
import (
|
||||
"log"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type subscription struct {
|
||||
channel string
|
||||
lastEventId string
|
||||
out chan Event
|
||||
}
|
||||
|
||||
type outbound struct {
|
||||
channels []string
|
||||
event Event
|
||||
}
|
||||
type registration struct {
|
||||
channel string
|
||||
repository Repository
|
||||
}
|
||||
|
||||
type Server struct {
|
||||
AllowCORS bool // Enable all handlers to be accessible from any origin
|
||||
ReplayAll bool // Replay repository even if there's no Last-Event-Id specified
|
||||
BufferSize int // How many messages do we let the client get behind before disconnecting
|
||||
Gzip bool // Enable compression if client can accept it
|
||||
Logger *log.Logger // Logger is a logger that, when set, will be used for logging debug messages
|
||||
registrations chan *registration
|
||||
pub chan *outbound
|
||||
subs chan *subscription
|
||||
unregister chan *subscription
|
||||
quit chan bool
|
||||
}
|
||||
|
||||
// Create a new Server ready for handler creation and publishing events
|
||||
func NewServer() *Server {
|
||||
srv := &Server{
|
||||
registrations: make(chan *registration),
|
||||
pub: make(chan *outbound),
|
||||
subs: make(chan *subscription),
|
||||
unregister: make(chan *subscription, 2),
|
||||
quit: make(chan bool),
|
||||
BufferSize: 128,
|
||||
}
|
||||
go srv.run()
|
||||
return srv
|
||||
}
|
||||
|
||||
// Stop handling publishing
|
||||
func (srv *Server) Close() {
|
||||
srv.quit <- true
|
||||
}
|
||||
|
||||
// Create a new handler for serving a specified channel
|
||||
func (srv *Server) Handler(channel string) http.HandlerFunc {
|
||||
return func(w http.ResponseWriter, req *http.Request) {
|
||||
h := w.Header()
|
||||
h.Set("Content-Type", "text/event-stream; charset=utf-8")
|
||||
h.Set("Cache-Control", "no-cache, no-store, must-revalidate")
|
||||
h.Set("Connection", "keep-alive")
|
||||
if srv.AllowCORS {
|
||||
h.Set("Access-Control-Allow-Origin", "*")
|
||||
}
|
||||
useGzip := srv.Gzip && strings.Contains(req.Header.Get("Accept-Encoding"), "gzip")
|
||||
if useGzip {
|
||||
h.Set("Content-Encoding", "gzip")
|
||||
}
|
||||
w.WriteHeader(http.StatusOK)
|
||||
|
||||
sub := &subscription{
|
||||
channel: channel,
|
||||
lastEventId: req.Header.Get("Last-Event-ID"),
|
||||
out: make(chan Event, srv.BufferSize),
|
||||
}
|
||||
srv.subs <- sub
|
||||
flusher := w.(http.Flusher)
|
||||
notifier := w.(http.CloseNotifier)
|
||||
flusher.Flush()
|
||||
enc := NewEncoder(w, useGzip)
|
||||
for {
|
||||
select {
|
||||
case <-notifier.CloseNotify():
|
||||
srv.unregister <- sub
|
||||
return
|
||||
case ev, ok := <-sub.out:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if err := enc.Encode(ev); err != nil {
|
||||
srv.unregister <- sub
|
||||
if srv.Logger != nil {
|
||||
srv.Logger.Println(err)
|
||||
}
|
||||
return
|
||||
}
|
||||
flusher.Flush()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Register the repository to be used for the specified channel
|
||||
func (srv *Server) Register(channel string, repo Repository) {
|
||||
srv.registrations <- ®istration{
|
||||
channel: channel,
|
||||
repository: repo,
|
||||
}
|
||||
}
|
||||
|
||||
// Publish an event with the specified id to one or more channels
|
||||
func (srv *Server) Publish(channels []string, ev Event) {
|
||||
srv.pub <- &outbound{
|
||||
channels: channels,
|
||||
event: ev,
|
||||
}
|
||||
}
|
||||
|
||||
func replay(repo Repository, sub *subscription) {
|
||||
for ev := range repo.Replay(sub.channel, sub.lastEventId) {
|
||||
sub.out <- ev
|
||||
}
|
||||
}
|
||||
|
||||
func (srv *Server) run() {
|
||||
subs := make(map[string]map[*subscription]struct{})
|
||||
repos := make(map[string]Repository)
|
||||
for {
|
||||
select {
|
||||
case reg := <-srv.registrations:
|
||||
repos[reg.channel] = reg.repository
|
||||
case sub := <-srv.unregister:
|
||||
delete(subs[sub.channel], sub)
|
||||
case pub := <-srv.pub:
|
||||
for _, c := range pub.channels {
|
||||
for s := range subs[c] {
|
||||
select {
|
||||
case s.out <- pub.event:
|
||||
default:
|
||||
srv.unregister <- s
|
||||
close(s.out)
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
case sub := <-srv.subs:
|
||||
if _, ok := subs[sub.channel]; !ok {
|
||||
subs[sub.channel] = make(map[*subscription]struct{})
|
||||
}
|
||||
subs[sub.channel][sub] = struct{}{}
|
||||
if srv.ReplayAll || len(sub.lastEventId) > 0 {
|
||||
repo, ok := repos[sub.channel]
|
||||
if ok {
|
||||
go replay(repo, sub)
|
||||
}
|
||||
}
|
||||
case <-srv.quit:
|
||||
for _, sub := range subs {
|
||||
for s := range sub {
|
||||
close(s.out)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
151
vendor/github.com/donovanhide/eventsource/stream.go
generated
vendored
Normal file
151
vendor/github.com/donovanhide/eventsource/stream.go
generated
vendored
Normal file
|
@ -0,0 +1,151 @@
|
|||
package eventsource
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Stream handles a connection for receiving Server Sent Events.
|
||||
// It will try and reconnect if the connection is lost, respecting both
|
||||
// received retry delays and event id's.
|
||||
type Stream struct {
|
||||
c *http.Client
|
||||
req *http.Request
|
||||
lastEventId string
|
||||
retry time.Duration
|
||||
// Events emits the events received by the stream
|
||||
Events chan Event
|
||||
// Errors emits any errors encountered while reading events from the stream.
|
||||
// It's mainly for informative purposes - the client isn't required to take any
|
||||
// action when an error is encountered. The stream will always attempt to continue,
|
||||
// even if that involves reconnecting to the server.
|
||||
Errors chan error
|
||||
// Logger is a logger that, when set, will be used for logging debug messages
|
||||
Logger *log.Logger
|
||||
}
|
||||
|
||||
type SubscriptionError struct {
|
||||
Code int
|
||||
Message string
|
||||
}
|
||||
|
||||
func (e SubscriptionError) Error() string {
|
||||
return fmt.Sprintf("%d: %s", e.Code, e.Message)
|
||||
}
|
||||
|
||||
// Subscribe to the Events emitted from the specified url.
|
||||
// If lastEventId is non-empty it will be sent to the server in case it can replay missed events.
|
||||
func Subscribe(url, lastEventId string) (*Stream, error) {
|
||||
req, err := http.NewRequest("GET", url, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return SubscribeWithRequest(lastEventId, req)
|
||||
}
|
||||
|
||||
// SubscribeWithRequest will take an http.Request to setup the stream, allowing custom headers
|
||||
// to be specified, authentication to be configured, etc.
|
||||
func SubscribeWithRequest(lastEventId string, request *http.Request) (*Stream, error) {
|
||||
return SubscribeWith(lastEventId, http.DefaultClient, request)
|
||||
}
|
||||
|
||||
// SubscribeWith takes a http client and request providing customization over both headers and
|
||||
// control over the http client settings (timeouts, tls, etc)
|
||||
func SubscribeWith(lastEventId string, client *http.Client, request *http.Request) (*Stream, error) {
|
||||
stream := &Stream{
|
||||
c: client,
|
||||
req: request,
|
||||
lastEventId: lastEventId,
|
||||
retry: (time.Millisecond * 3000),
|
||||
Events: make(chan Event),
|
||||
Errors: make(chan error),
|
||||
}
|
||||
stream.c.CheckRedirect = checkRedirect
|
||||
|
||||
r, err := stream.connect()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
go stream.stream(r)
|
||||
return stream, nil
|
||||
}
|
||||
|
||||
// Go's http package doesn't copy headers across when it encounters
|
||||
// redirects so we need to do that manually.
|
||||
func checkRedirect(req *http.Request, via []*http.Request) error {
|
||||
if len(via) >= 10 {
|
||||
return errors.New("stopped after 10 redirects")
|
||||
}
|
||||
for k, vv := range via[0].Header {
|
||||
for _, v := range vv {
|
||||
req.Header.Add(k, v)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (stream *Stream) connect() (r io.ReadCloser, err error) {
|
||||
var resp *http.Response
|
||||
stream.req.Header.Set("Cache-Control", "no-cache")
|
||||
stream.req.Header.Set("Accept", "text/event-stream")
|
||||
if len(stream.lastEventId) > 0 {
|
||||
stream.req.Header.Set("Last-Event-ID", stream.lastEventId)
|
||||
}
|
||||
if resp, err = stream.c.Do(stream.req); err != nil {
|
||||
return
|
||||
}
|
||||
if resp.StatusCode != 200 {
|
||||
message, _ := ioutil.ReadAll(resp.Body)
|
||||
err = SubscriptionError{
|
||||
Code: resp.StatusCode,
|
||||
Message: string(message),
|
||||
}
|
||||
}
|
||||
r = resp.Body
|
||||
return
|
||||
}
|
||||
|
||||
func (stream *Stream) stream(r io.ReadCloser) {
|
||||
defer r.Close()
|
||||
dec := NewDecoder(r)
|
||||
for {
|
||||
ev, err := dec.Decode()
|
||||
|
||||
if err != nil {
|
||||
stream.Errors <- err
|
||||
// respond to all errors by reconnecting and trying again
|
||||
break
|
||||
}
|
||||
pub := ev.(*publication)
|
||||
if pub.Retry() > 0 {
|
||||
stream.retry = time.Duration(pub.Retry()) * time.Millisecond
|
||||
}
|
||||
if len(pub.Id()) > 0 {
|
||||
stream.lastEventId = pub.Id()
|
||||
}
|
||||
stream.Events <- ev
|
||||
}
|
||||
backoff := stream.retry
|
||||
for {
|
||||
time.Sleep(backoff)
|
||||
if stream.Logger != nil {
|
||||
stream.Logger.Printf("Reconnecting in %0.4f secs\n", backoff.Seconds())
|
||||
}
|
||||
|
||||
// NOTE: because of the defer we're opening the new connection
|
||||
// before closing the old one. Shouldn't be a problem in practice,
|
||||
// but something to be aware of.
|
||||
next, err := stream.connect()
|
||||
if err == nil {
|
||||
go stream.stream(next)
|
||||
break
|
||||
}
|
||||
stream.Errors <- err
|
||||
backoff *= 2
|
||||
}
|
||||
}
|
Loading…
Add table
Add a link
Reference in a new issue