swarm: integrate OpenTracing; propagate ctx to internal APIs (#17169)
* swarm: propagate ctx, enable opentracing * swarm/tracing: log error when tracing is misconfigured
This commit is contained in:
		
				
					committed by
					
						 Balint Gabor
						Balint Gabor
					
				
			
			
				
	
			
			
			
						parent
						
							f7d3678c28
						
					
				
				
					commit
					7c9314f231
				
			| @@ -43,6 +43,7 @@ import ( | ||||
| 	"github.com/ethereum/go-ethereum/swarm" | ||||
| 	bzzapi "github.com/ethereum/go-ethereum/swarm/api" | ||||
| 	swarmmetrics "github.com/ethereum/go-ethereum/swarm/metrics" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/tracing" | ||||
|  | ||||
| 	"gopkg.in/urfave/cli.v1" | ||||
| ) | ||||
| @@ -430,12 +431,14 @@ pv(1) tool to get a progress bar: | ||||
| 	app.Flags = append(app.Flags, rpcFlags...) | ||||
| 	app.Flags = append(app.Flags, debug.Flags...) | ||||
| 	app.Flags = append(app.Flags, swarmmetrics.Flags...) | ||||
| 	app.Flags = append(app.Flags, tracing.Flags...) | ||||
| 	app.Before = func(ctx *cli.Context) error { | ||||
| 		runtime.GOMAXPROCS(runtime.NumCPU()) | ||||
| 		if err := debug.Setup(ctx, ""); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		swarmmetrics.Setup(ctx) | ||||
| 		tracing.Setup(ctx) | ||||
| 		return nil | ||||
| 	} | ||||
| 	app.After = func(ctx *cli.Context) error { | ||||
|   | ||||
| @@ -77,8 +77,9 @@ func cliUploadAndSync(c *cli.Context) error { | ||||
| 	log.Info("uploaded successfully", "hash", hash, "digest", fmt.Sprintf("%x", fhash)) | ||||
|  | ||||
| 	if filesize < 10 { | ||||
| 		time.Sleep(15 * time.Second) | ||||
| 		time.Sleep(35 * time.Second) | ||||
| 	} else { | ||||
| 		time.Sleep(15 * time.Second) | ||||
| 		time.Sleep(2 * time.Duration(filesize) * time.Second) | ||||
| 	} | ||||
|  | ||||
| @@ -108,7 +109,7 @@ func cliUploadAndSync(c *cli.Context) error { | ||||
| // fetch is getting the requested `hash` from the `endpoint` and compares it with the `original` file | ||||
| func fetch(hash string, endpoint string, original []byte, ruid string) error { | ||||
| 	log.Trace("sleeping", "ruid", ruid) | ||||
| 	time.Sleep(1 * time.Second) | ||||
| 	time.Sleep(5 * time.Second) | ||||
|  | ||||
| 	log.Trace("http get request", "ruid", ruid, "api", endpoint, "hash", hash) | ||||
| 	res, err := http.Get(endpoint + "/bzz:/" + hash + "/") | ||||
|   | ||||
| @@ -29,6 +29,8 @@ devp2p subprotocols by abstracting away code standardly shared by protocols. | ||||
| package protocols | ||||
|  | ||||
| import ( | ||||
| 	"bufio" | ||||
| 	"bytes" | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| @@ -39,6 +41,10 @@ import ( | ||||
| 	"github.com/ethereum/go-ethereum/log" | ||||
| 	"github.com/ethereum/go-ethereum/metrics" | ||||
| 	"github.com/ethereum/go-ethereum/p2p" | ||||
| 	"github.com/ethereum/go-ethereum/rlp" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/spancontext" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/tracing" | ||||
| 	opentracing "github.com/opentracing/opentracing-go" | ||||
| ) | ||||
|  | ||||
| // error codes used by this  protocol scheme | ||||
| @@ -109,6 +115,13 @@ func errorf(code int, format string, params ...interface{}) *Error { | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // WrappedMsg is used to propagate marshalled context alongside message payloads | ||||
| type WrappedMsg struct { | ||||
| 	Context []byte | ||||
| 	Size    uint32 | ||||
| 	Payload []byte | ||||
| } | ||||
|  | ||||
| // Spec is a protocol specification including its name and version as well as | ||||
| // the types of messages which are exchanged | ||||
| type Spec struct { | ||||
| @@ -201,7 +214,7 @@ func NewPeer(p *p2p.Peer, rw p2p.MsgReadWriter, spec *Spec) *Peer { | ||||
| // the handler argument is a function which is called for each message received | ||||
| // from the remote peer, a returned error causes the loop to exit | ||||
| // resulting in disconnection | ||||
| func (p *Peer) Run(handler func(msg interface{}) error) error { | ||||
| func (p *Peer) Run(handler func(ctx context.Context, msg interface{}) error) error { | ||||
| 	for { | ||||
| 		if err := p.handleIncoming(handler); err != nil { | ||||
| 			if err != io.EOF { | ||||
| @@ -225,14 +238,47 @@ func (p *Peer) Drop(err error) { | ||||
| // message off to the peer | ||||
| // this low level call will be wrapped by libraries providing routed or broadcast sends | ||||
| // but often just used to forward and push messages to directly connected peers | ||||
| func (p *Peer) Send(msg interface{}) error { | ||||
| func (p *Peer) Send(ctx context.Context, msg interface{}) error { | ||||
| 	defer metrics.GetOrRegisterResettingTimer("peer.send_t", nil).UpdateSince(time.Now()) | ||||
| 	metrics.GetOrRegisterCounter("peer.send", nil).Inc(1) | ||||
|  | ||||
| 	var b bytes.Buffer | ||||
| 	if tracing.Enabled { | ||||
| 		writer := bufio.NewWriter(&b) | ||||
|  | ||||
| 		tracer := opentracing.GlobalTracer() | ||||
|  | ||||
| 		sctx := spancontext.FromContext(ctx) | ||||
|  | ||||
| 		if sctx != nil { | ||||
| 			err := tracer.Inject( | ||||
| 				sctx, | ||||
| 				opentracing.Binary, | ||||
| 				writer) | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		writer.Flush() | ||||
| 	} | ||||
|  | ||||
| 	r, err := rlp.EncodeToBytes(msg) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	wmsg := WrappedMsg{ | ||||
| 		Context: b.Bytes(), | ||||
| 		Size:    uint32(len(r)), | ||||
| 		Payload: r, | ||||
| 	} | ||||
|  | ||||
| 	code, found := p.spec.GetCode(msg) | ||||
| 	if !found { | ||||
| 		return errorf(ErrInvalidMsgType, "%v", code) | ||||
| 	} | ||||
| 	return p2p.Send(p.rw, code, msg) | ||||
| 	return p2p.Send(p.rw, code, wmsg) | ||||
| } | ||||
|  | ||||
| // handleIncoming(code) | ||||
| @@ -243,7 +289,7 @@ func (p *Peer) Send(msg interface{}) error { | ||||
| // * checks for out-of-range message codes, | ||||
| // * handles decoding with reflection, | ||||
| // * call handlers as callbacks | ||||
| func (p *Peer) handleIncoming(handle func(msg interface{}) error) error { | ||||
| func (p *Peer) handleIncoming(handle func(ctx context.Context, msg interface{}) error) error { | ||||
| 	msg, err := p.rw.ReadMsg() | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| @@ -255,11 +301,38 @@ func (p *Peer) handleIncoming(handle func(msg interface{}) error) error { | ||||
| 		return errorf(ErrMsgTooLong, "%v > %v", msg.Size, p.spec.MaxMsgSize) | ||||
| 	} | ||||
|  | ||||
| 	// unmarshal wrapped msg, which might contain context | ||||
| 	var wmsg WrappedMsg | ||||
| 	err = msg.Decode(&wmsg) | ||||
| 	if err != nil { | ||||
| 		log.Error(err.Error()) | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	ctx := context.Background() | ||||
|  | ||||
| 	// if tracing is enabled and the context coming within the request is | ||||
| 	// not empty, try to unmarshal it | ||||
| 	if tracing.Enabled && len(wmsg.Context) > 0 { | ||||
| 		var sctx opentracing.SpanContext | ||||
|  | ||||
| 		tracer := opentracing.GlobalTracer() | ||||
| 		sctx, err = tracer.Extract( | ||||
| 			opentracing.Binary, | ||||
| 			bytes.NewReader(wmsg.Context)) | ||||
| 		if err != nil { | ||||
| 			log.Error(err.Error()) | ||||
| 			return err | ||||
| 		} | ||||
|  | ||||
| 		ctx = spancontext.WithContext(ctx, sctx) | ||||
| 	} | ||||
|  | ||||
| 	val, ok := p.spec.NewMsg(msg.Code) | ||||
| 	if !ok { | ||||
| 		return errorf(ErrInvalidMsgCode, "%v", msg.Code) | ||||
| 	} | ||||
| 	if err := msg.Decode(val); err != nil { | ||||
| 	if err := rlp.DecodeBytes(wmsg.Payload, val); err != nil { | ||||
| 		return errorf(ErrDecode, "<= %v: %v", msg, err) | ||||
| 	} | ||||
|  | ||||
| @@ -268,7 +341,7 @@ func (p *Peer) handleIncoming(handle func(msg interface{}) error) error { | ||||
| 	// which the handler is supposed to cast to the appropriate type | ||||
| 	// it is entirely safe not to check the cast in the handler since the handler is | ||||
| 	// chosen based on the proper type in the first place | ||||
| 	if err := handle(val); err != nil { | ||||
| 	if err := handle(ctx, val); err != nil { | ||||
| 		return errorf(ErrHandler, "(msg code %v): %v", msg.Code, err) | ||||
| 	} | ||||
| 	return nil | ||||
| @@ -288,14 +361,14 @@ func (p *Peer) Handshake(ctx context.Context, hs interface{}, verify func(interf | ||||
| 		return nil, errorf(ErrHandshake, "unknown handshake message type: %T", hs) | ||||
| 	} | ||||
| 	errc := make(chan error, 2) | ||||
| 	handle := func(msg interface{}) error { | ||||
| 	handle := func(ctx context.Context, msg interface{}) error { | ||||
| 		rhs = msg | ||||
| 		if verify != nil { | ||||
| 			return verify(rhs) | ||||
| 		} | ||||
| 		return nil | ||||
| 	} | ||||
| 	send := func() { errc <- p.Send(hs) } | ||||
| 	send := func() { errc <- p.Send(ctx, hs) } | ||||
| 	receive := func() { errc <- p.handleIncoming(handle) } | ||||
|  | ||||
| 	go func() { | ||||
|   | ||||
| @@ -104,7 +104,7 @@ func newProtocol(pp *p2ptest.TestPeerPool) func(*p2p.Peer, p2p.MsgReadWriter) er | ||||
| 			return fmt.Errorf("handshake mismatch remote %v > local %v", rmhs.C, lhs.C) | ||||
| 		} | ||||
|  | ||||
| 		handle := func(msg interface{}) error { | ||||
| 		handle := func(ctx context.Context, msg interface{}) error { | ||||
| 			switch msg := msg.(type) { | ||||
|  | ||||
| 			case *protoHandshake: | ||||
| @@ -116,7 +116,7 @@ func newProtocol(pp *p2ptest.TestPeerPool) func(*p2p.Peer, p2p.MsgReadWriter) er | ||||
| 					return fmt.Errorf("handshake mismatch remote %v > local %v", rhs.C, lhs.C) | ||||
| 				} | ||||
| 				lhs.C += rhs.C | ||||
| 				return peer.Send(lhs) | ||||
| 				return peer.Send(ctx, lhs) | ||||
|  | ||||
| 			case *kill: | ||||
| 				// demonstrates use of peerPool, killing another peer connection as a response to a message | ||||
|   | ||||
| @@ -180,7 +180,8 @@ func (m *mockNode) Run(peer *p2p.Peer, rw p2p.MsgReadWriter) error { | ||||
| 	for { | ||||
| 		select { | ||||
| 		case trig := <-m.trigger: | ||||
| 			m.err <- p2p.Send(rw, trig.Code, trig.Msg) | ||||
| 			wmsg := Wrap(trig.Msg) | ||||
| 			m.err <- p2p.Send(rw, trig.Code, wmsg) | ||||
| 		case exps := <-m.expect: | ||||
| 			m.err <- expectMsgs(rw, exps) | ||||
| 		case <-m.stop: | ||||
| @@ -220,7 +221,7 @@ func expectMsgs(rw p2p.MsgReadWriter, exps []Expect) error { | ||||
| 		} | ||||
| 		var found bool | ||||
| 		for i, exp := range exps { | ||||
| 			if exp.Code == msg.Code && bytes.Equal(actualContent, mustEncodeMsg(exp.Msg)) { | ||||
| 			if exp.Code == msg.Code && bytes.Equal(actualContent, mustEncodeMsg(Wrap(exp.Msg))) { | ||||
| 				if matched[i] { | ||||
| 					return fmt.Errorf("message #%d received two times", i) | ||||
| 				} | ||||
| @@ -235,7 +236,7 @@ func expectMsgs(rw p2p.MsgReadWriter, exps []Expect) error { | ||||
| 				if matched[i] { | ||||
| 					continue | ||||
| 				} | ||||
| 				expected = append(expected, fmt.Sprintf("code %d payload %x", exp.Code, mustEncodeMsg(exp.Msg))) | ||||
| 				expected = append(expected, fmt.Sprintf("code %d payload %x", exp.Code, mustEncodeMsg(Wrap(exp.Msg)))) | ||||
| 			} | ||||
| 			return fmt.Errorf("unexpected message code %d payload %x, expected %s", msg.Code, actualContent, strings.Join(expected, " or ")) | ||||
| 		} | ||||
| @@ -267,3 +268,17 @@ func mustEncodeMsg(msg interface{}) []byte { | ||||
| 	} | ||||
| 	return contentEnc | ||||
| } | ||||
|  | ||||
| type WrappedMsg struct { | ||||
| 	Context []byte | ||||
| 	Size    uint32 | ||||
| 	Payload []byte | ||||
| } | ||||
|  | ||||
| func Wrap(msg interface{}) interface{} { | ||||
| 	data, _ := rlp.EncodeToBytes(msg) | ||||
| 	return &WrappedMsg{ | ||||
| 		Size:    uint32(len(data)), | ||||
| 		Payload: data, | ||||
| 	} | ||||
| } | ||||
|   | ||||
| @@ -37,8 +37,10 @@ import ( | ||||
| 	"github.com/ethereum/go-ethereum/metrics" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/log" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/multihash" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/spancontext" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/storage" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/storage/mru" | ||||
| 	opentracing "github.com/opentracing/opentracing-go" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| @@ -263,6 +265,12 @@ func (a *API) Resolve(ctx context.Context, uri *URI) (storage.Address, error) { | ||||
| 	apiResolveCount.Inc(1) | ||||
| 	log.Trace("resolving", "uri", uri.Addr) | ||||
|  | ||||
| 	var sp opentracing.Span | ||||
| 	ctx, sp = spancontext.StartSpan( | ||||
| 		ctx, | ||||
| 		"api.resolve") | ||||
| 	defer sp.Finish() | ||||
|  | ||||
| 	// if the URI is immutable, check if the address looks like a hash | ||||
| 	if uri.Immutable() { | ||||
| 		key := uri.Address() | ||||
| @@ -347,7 +355,7 @@ func (a *API) Get(ctx context.Context, manifestAddr storage.Address, path string | ||||
| 			log.Trace("resource type", "key", manifestAddr, "hash", entry.Hash) | ||||
| 			ctx, cancel := context.WithCancel(context.Background()) | ||||
| 			defer cancel() | ||||
| 			rsrc, err := a.resource.Load(storage.Address(common.FromHex(entry.Hash))) | ||||
| 			rsrc, err := a.resource.Load(ctx, storage.Address(common.FromHex(entry.Hash))) | ||||
| 			if err != nil { | ||||
| 				apiGetNotFound.Inc(1) | ||||
| 				status = http.StatusNotFound | ||||
| @@ -486,7 +494,7 @@ func (a *API) GetDirectoryTar(ctx context.Context, uri *URI) (io.ReadCloser, err | ||||
|  | ||||
| 			// retrieve the entry's key and size | ||||
| 			reader, _ := a.Retrieve(ctx, storage.Address(common.Hex2Bytes(entry.Hash))) | ||||
| 			size, err := reader.Size(nil) | ||||
| 			size, err := reader.Size(ctx, nil) | ||||
| 			if err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| @@ -883,7 +891,7 @@ func (a *API) BuildDirectoryTree(ctx context.Context, mhash string, nameresolver | ||||
| // ResourceLookup Looks up mutable resource updates at specific periods and versions | ||||
| func (a *API) ResourceLookup(ctx context.Context, addr storage.Address, period uint32, version uint32, maxLookup *mru.LookupParams) (string, []byte, error) { | ||||
| 	var err error | ||||
| 	rsrc, err := a.resource.Load(addr) | ||||
| 	rsrc, err := a.resource.Load(ctx, addr) | ||||
| 	if err != nil { | ||||
| 		return "", nil, err | ||||
| 	} | ||||
|   | ||||
| @@ -90,7 +90,7 @@ func testGet(t *testing.T, api *API, bzzhash, path string) *testResponse { | ||||
| 		t.Fatalf("unexpected error: %v", err) | ||||
| 	} | ||||
| 	quitC := make(chan bool) | ||||
| 	size, err := reader.Size(quitC) | ||||
| 	size, err := reader.Size(context.TODO(), quitC) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("unexpected error: %v", err) | ||||
| 	} | ||||
|   | ||||
| @@ -277,7 +277,7 @@ func retrieveToFile(quitC chan bool, fileStore *storage.FileStore, addr storage. | ||||
| 	} | ||||
| 	reader, _ := fileStore.Retrieve(context.TODO(), addr) | ||||
| 	writer := bufio.NewWriter(f) | ||||
| 	size, err := reader.Size(quitC) | ||||
| 	size, err := reader.Size(context.TODO(), quitC) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|   | ||||
| @@ -42,8 +42,11 @@ import ( | ||||
| 	"github.com/ethereum/go-ethereum/metrics" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/api" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/log" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/spancontext" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/storage" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/storage/mru" | ||||
| 	opentracing "github.com/opentracing/opentracing-go" | ||||
|  | ||||
| 	"github.com/pborman/uuid" | ||||
| 	"github.com/rs/cors" | ||||
| ) | ||||
| @@ -263,6 +266,13 @@ func (s *Server) HandlePostRaw(w http.ResponseWriter, r *Request) { | ||||
|  | ||||
| 	postRawCount.Inc(1) | ||||
|  | ||||
| 	ctx := r.Context() | ||||
| 	var sp opentracing.Span | ||||
| 	ctx, sp = spancontext.StartSpan( | ||||
| 		ctx, | ||||
| 		"http.post.raw") | ||||
| 	defer sp.Finish() | ||||
|  | ||||
| 	toEncrypt := false | ||||
| 	if r.uri.Addr == "encrypt" { | ||||
| 		toEncrypt = true | ||||
| @@ -286,7 +296,7 @@ func (s *Server) HandlePostRaw(w http.ResponseWriter, r *Request) { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	addr, _, err := s.api.Store(r.Context(), r.Body, r.ContentLength, toEncrypt) | ||||
| 	addr, _, err := s.api.Store(ctx, r.Body, r.ContentLength, toEncrypt) | ||||
| 	if err != nil { | ||||
| 		postRawFail.Inc(1) | ||||
| 		Respond(w, r, err.Error(), http.StatusInternalServerError) | ||||
| @@ -307,8 +317,15 @@ func (s *Server) HandlePostRaw(w http.ResponseWriter, r *Request) { | ||||
| // resulting manifest hash as a text/plain response | ||||
| func (s *Server) HandlePostFiles(w http.ResponseWriter, r *Request) { | ||||
| 	log.Debug("handle.post.files", "ruid", r.ruid) | ||||
|  | ||||
| 	postFilesCount.Inc(1) | ||||
|  | ||||
| 	var sp opentracing.Span | ||||
| 	ctx := r.Context() | ||||
| 	ctx, sp = spancontext.StartSpan( | ||||
| 		ctx, | ||||
| 		"http.post.files") | ||||
| 	defer sp.Finish() | ||||
|  | ||||
| 	contentType, params, err := mime.ParseMediaType(r.Header.Get("Content-Type")) | ||||
| 	if err != nil { | ||||
| 		postFilesFail.Inc(1) | ||||
| @@ -323,7 +340,7 @@ func (s *Server) HandlePostFiles(w http.ResponseWriter, r *Request) { | ||||
|  | ||||
| 	var addr storage.Address | ||||
| 	if r.uri.Addr != "" && r.uri.Addr != "encrypt" { | ||||
| 		addr, err = s.api.Resolve(r.Context(), r.uri) | ||||
| 		addr, err = s.api.Resolve(ctx, r.uri) | ||||
| 		if err != nil { | ||||
| 			postFilesFail.Inc(1) | ||||
| 			Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusInternalServerError) | ||||
| @@ -331,7 +348,7 @@ func (s *Server) HandlePostFiles(w http.ResponseWriter, r *Request) { | ||||
| 		} | ||||
| 		log.Debug("resolved key", "ruid", r.ruid, "key", addr) | ||||
| 	} else { | ||||
| 		addr, err = s.api.NewManifest(r.Context(), toEncrypt) | ||||
| 		addr, err = s.api.NewManifest(ctx, toEncrypt) | ||||
| 		if err != nil { | ||||
| 			postFilesFail.Inc(1) | ||||
| 			Respond(w, r, err.Error(), http.StatusInternalServerError) | ||||
| @@ -340,7 +357,7 @@ func (s *Server) HandlePostFiles(w http.ResponseWriter, r *Request) { | ||||
| 		log.Debug("new manifest", "ruid", r.ruid, "key", addr) | ||||
| 	} | ||||
|  | ||||
| 	newAddr, err := s.api.UpdateManifest(r.Context(), addr, func(mw *api.ManifestWriter) error { | ||||
| 	newAddr, err := s.api.UpdateManifest(ctx, addr, func(mw *api.ManifestWriter) error { | ||||
| 		switch contentType { | ||||
|  | ||||
| 		case "application/x-tar": | ||||
| @@ -509,6 +526,14 @@ func resourcePostMode(path string) (isRaw bool, frequency uint64, err error) { | ||||
| // and name "foo.eth" will be created | ||||
| func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) { | ||||
| 	log.Debug("handle.post.resource", "ruid", r.ruid) | ||||
|  | ||||
| 	var sp opentracing.Span | ||||
| 	ctx := r.Context() | ||||
| 	ctx, sp = spancontext.StartSpan( | ||||
| 		ctx, | ||||
| 		"http.post.resource") | ||||
| 	defer sp.Finish() | ||||
|  | ||||
| 	var err error | ||||
| 	var addr storage.Address | ||||
| 	var name string | ||||
| @@ -525,7 +550,7 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) { | ||||
| 		name = r.uri.Addr | ||||
|  | ||||
| 		// the key is the content addressed root chunk holding mutable resource metadata information | ||||
| 		addr, err = s.api.ResourceCreate(r.Context(), name, frequency) | ||||
| 		addr, err = s.api.ResourceCreate(ctx, name, frequency) | ||||
| 		if err != nil { | ||||
| 			code, err2 := s.translateResourceError(w, r, "resource creation fail", err) | ||||
|  | ||||
| @@ -536,7 +561,7 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) { | ||||
| 		// we create a manifest so we can retrieve the resource with bzz:// later | ||||
| 		// this manifest has a special "resource type" manifest, and its hash is the key of the mutable resource | ||||
| 		// root chunk | ||||
| 		m, err := s.api.NewResourceManifest(r.Context(), addr.Hex()) | ||||
| 		m, err := s.api.NewResourceManifest(ctx, addr.Hex()) | ||||
| 		if err != nil { | ||||
| 			Respond(w, r, fmt.Sprintf("failed to create resource manifest: %v", err), http.StatusInternalServerError) | ||||
| 			return | ||||
| @@ -556,7 +581,7 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) { | ||||
| 		// that means that we retrieve the manifest and inspect its Hash member. | ||||
| 		manifestAddr := r.uri.Address() | ||||
| 		if manifestAddr == nil { | ||||
| 			manifestAddr, err = s.api.Resolve(r.Context(), r.uri) | ||||
| 			manifestAddr, err = s.api.Resolve(ctx, r.uri) | ||||
| 			if err != nil { | ||||
| 				getFail.Inc(1) | ||||
| 				Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound) | ||||
| @@ -567,7 +592,7 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) { | ||||
| 		} | ||||
|  | ||||
| 		// get the root chunk key from the manifest | ||||
| 		addr, err = s.api.ResolveResourceManifest(r.Context(), manifestAddr) | ||||
| 		addr, err = s.api.ResolveResourceManifest(ctx, manifestAddr) | ||||
| 		if err != nil { | ||||
| 			getFail.Inc(1) | ||||
| 			Respond(w, r, fmt.Sprintf("error resolving resource root chunk for %s: %s", r.uri.Addr, err), http.StatusNotFound) | ||||
| @@ -576,7 +601,7 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) { | ||||
|  | ||||
| 		log.Debug("handle.post.resource: resolved", "ruid", r.ruid, "manifestkey", manifestAddr, "rootchunkkey", addr) | ||||
|  | ||||
| 		name, _, err = s.api.ResourceLookup(r.Context(), addr, 0, 0, &mru.LookupParams{}) | ||||
| 		name, _, err = s.api.ResourceLookup(ctx, addr, 0, 0, &mru.LookupParams{}) | ||||
| 		if err != nil { | ||||
| 			Respond(w, r, err.Error(), http.StatusNotFound) | ||||
| 			return | ||||
| @@ -592,7 +617,7 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) { | ||||
|  | ||||
| 	// Multihash will be passed as hex-encoded data, so we need to parse this to bytes | ||||
| 	if isRaw { | ||||
| 		_, _, _, err = s.api.ResourceUpdate(r.Context(), name, data) | ||||
| 		_, _, _, err = s.api.ResourceUpdate(ctx, name, data) | ||||
| 		if err != nil { | ||||
| 			Respond(w, r, err.Error(), http.StatusBadRequest) | ||||
| 			return | ||||
| @@ -603,7 +628,7 @@ func (s *Server) HandlePostResource(w http.ResponseWriter, r *Request) { | ||||
| 			Respond(w, r, err.Error(), http.StatusBadRequest) | ||||
| 			return | ||||
| 		} | ||||
| 		_, _, _, err = s.api.ResourceUpdateMultihash(r.Context(), name, bytesdata) | ||||
| 		_, _, _, err = s.api.ResourceUpdateMultihash(ctx, name, bytesdata) | ||||
| 		if err != nil { | ||||
| 			Respond(w, r, err.Error(), http.StatusBadRequest) | ||||
| 			return | ||||
| @@ -730,10 +755,18 @@ func (s *Server) translateResourceError(w http.ResponseWriter, r *Request, supEr | ||||
| func (s *Server) HandleGet(w http.ResponseWriter, r *Request) { | ||||
| 	log.Debug("handle.get", "ruid", r.ruid, "uri", r.uri) | ||||
| 	getCount.Inc(1) | ||||
|  | ||||
| 	var sp opentracing.Span | ||||
| 	ctx := r.Context() | ||||
| 	ctx, sp = spancontext.StartSpan( | ||||
| 		ctx, | ||||
| 		"http.get") | ||||
| 	defer sp.Finish() | ||||
|  | ||||
| 	var err error | ||||
| 	addr := r.uri.Address() | ||||
| 	if addr == nil { | ||||
| 		addr, err = s.api.Resolve(r.Context(), r.uri) | ||||
| 		addr, err = s.api.Resolve(ctx, r.uri) | ||||
| 		if err != nil { | ||||
| 			getFail.Inc(1) | ||||
| 			Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound) | ||||
| @@ -748,7 +781,7 @@ func (s *Server) HandleGet(w http.ResponseWriter, r *Request) { | ||||
| 	// if path is set, interpret <key> as a manifest and return the | ||||
| 	// raw entry at the given path | ||||
| 	if r.uri.Path != "" { | ||||
| 		walker, err := s.api.NewManifestWalker(r.Context(), addr, nil) | ||||
| 		walker, err := s.api.NewManifestWalker(ctx, addr, nil) | ||||
| 		if err != nil { | ||||
| 			getFail.Inc(1) | ||||
| 			Respond(w, r, fmt.Sprintf("%s is not a manifest", addr), http.StatusBadRequest) | ||||
| @@ -796,8 +829,8 @@ func (s *Server) HandleGet(w http.ResponseWriter, r *Request) { | ||||
| 	} | ||||
|  | ||||
| 	// check the root chunk exists by retrieving the file's size | ||||
| 	reader, isEncrypted := s.api.Retrieve(r.Context(), addr) | ||||
| 	if _, err := reader.Size(nil); err != nil { | ||||
| 	reader, isEncrypted := s.api.Retrieve(ctx, addr) | ||||
| 	if _, err := reader.Size(ctx, nil); err != nil { | ||||
| 		getFail.Inc(1) | ||||
| 		Respond(w, r, fmt.Sprintf("root chunk not found %s: %s", addr, err), http.StatusNotFound) | ||||
| 		return | ||||
| @@ -828,13 +861,21 @@ func (s *Server) HandleGet(w http.ResponseWriter, r *Request) { | ||||
| func (s *Server) HandleGetList(w http.ResponseWriter, r *Request) { | ||||
| 	log.Debug("handle.get.list", "ruid", r.ruid, "uri", r.uri) | ||||
| 	getListCount.Inc(1) | ||||
|  | ||||
| 	var sp opentracing.Span | ||||
| 	ctx := r.Context() | ||||
| 	ctx, sp = spancontext.StartSpan( | ||||
| 		ctx, | ||||
| 		"http.get.list") | ||||
| 	defer sp.Finish() | ||||
|  | ||||
| 	// ensure the root path has a trailing slash so that relative URLs work | ||||
| 	if r.uri.Path == "" && !strings.HasSuffix(r.URL.Path, "/") { | ||||
| 		http.Redirect(w, &r.Request, r.URL.Path+"/", http.StatusMovedPermanently) | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	addr, err := s.api.Resolve(r.Context(), r.uri) | ||||
| 	addr, err := s.api.Resolve(ctx, r.uri) | ||||
| 	if err != nil { | ||||
| 		getListFail.Inc(1) | ||||
| 		Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound) | ||||
| @@ -842,7 +883,7 @@ func (s *Server) HandleGetList(w http.ResponseWriter, r *Request) { | ||||
| 	} | ||||
| 	log.Debug("handle.get.list: resolved", "ruid", r.ruid, "key", addr) | ||||
|  | ||||
| 	list, err := s.api.GetManifestList(r.Context(), addr, r.uri.Path) | ||||
| 	list, err := s.api.GetManifestList(ctx, addr, r.uri.Path) | ||||
| 	if err != nil { | ||||
| 		getListFail.Inc(1) | ||||
| 		Respond(w, r, err.Error(), http.StatusInternalServerError) | ||||
| @@ -877,19 +918,28 @@ func (s *Server) HandleGetList(w http.ResponseWriter, r *Request) { | ||||
| func (s *Server) HandleGetFile(w http.ResponseWriter, r *Request) { | ||||
| 	log.Debug("handle.get.file", "ruid", r.ruid) | ||||
| 	getFileCount.Inc(1) | ||||
|  | ||||
| 	var sp opentracing.Span | ||||
| 	ctx := r.Context() | ||||
| 	ctx, sp = spancontext.StartSpan( | ||||
| 		ctx, | ||||
| 		"http.get.file") | ||||
|  | ||||
| 	// ensure the root path has a trailing slash so that relative URLs work | ||||
| 	if r.uri.Path == "" && !strings.HasSuffix(r.URL.Path, "/") { | ||||
| 		http.Redirect(w, &r.Request, r.URL.Path+"/", http.StatusMovedPermanently) | ||||
| 		sp.Finish() | ||||
| 		return | ||||
| 	} | ||||
| 	var err error | ||||
| 	manifestAddr := r.uri.Address() | ||||
|  | ||||
| 	if manifestAddr == nil { | ||||
| 		manifestAddr, err = s.api.Resolve(r.Context(), r.uri) | ||||
| 		manifestAddr, err = s.api.Resolve(ctx, r.uri) | ||||
| 		if err != nil { | ||||
| 			getFileFail.Inc(1) | ||||
| 			Respond(w, r, fmt.Sprintf("cannot resolve %s: %s", r.uri.Addr, err), http.StatusNotFound) | ||||
| 			sp.Finish() | ||||
| 			return | ||||
| 		} | ||||
| 	} else { | ||||
| @@ -897,7 +947,8 @@ func (s *Server) HandleGetFile(w http.ResponseWriter, r *Request) { | ||||
| 	} | ||||
|  | ||||
| 	log.Debug("handle.get.file: resolved", "ruid", r.ruid, "key", manifestAddr) | ||||
| 	reader, contentType, status, contentKey, err := s.api.Get(r.Context(), manifestAddr, r.uri.Path) | ||||
|  | ||||
| 	reader, contentType, status, contentKey, err := s.api.Get(ctx, manifestAddr, r.uri.Path) | ||||
|  | ||||
| 	etag := common.Bytes2Hex(contentKey) | ||||
| 	noneMatchEtag := r.Header.Get("If-None-Match") | ||||
| @@ -905,6 +956,7 @@ func (s *Server) HandleGetFile(w http.ResponseWriter, r *Request) { | ||||
| 	if noneMatchEtag != "" { | ||||
| 		if bytes.Equal(storage.Address(common.Hex2Bytes(noneMatchEtag)), contentKey) { | ||||
| 			Respond(w, r, "Not Modified", http.StatusNotModified) | ||||
| 			sp.Finish() | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| @@ -918,34 +970,49 @@ func (s *Server) HandleGetFile(w http.ResponseWriter, r *Request) { | ||||
| 			getFileFail.Inc(1) | ||||
| 			Respond(w, r, err.Error(), http.StatusInternalServerError) | ||||
| 		} | ||||
| 		sp.Finish() | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	//the request results in ambiguous files | ||||
| 	//e.g. /read with readme.md and readinglist.txt available in manifest | ||||
| 	if status == http.StatusMultipleChoices { | ||||
| 		list, err := s.api.GetManifestList(r.Context(), manifestAddr, r.uri.Path) | ||||
| 		list, err := s.api.GetManifestList(ctx, manifestAddr, r.uri.Path) | ||||
| 		if err != nil { | ||||
| 			getFileFail.Inc(1) | ||||
| 			Respond(w, r, err.Error(), http.StatusInternalServerError) | ||||
| 			sp.Finish() | ||||
| 			return | ||||
| 		} | ||||
|  | ||||
| 		log.Debug(fmt.Sprintf("Multiple choices! --> %v", list), "ruid", r.ruid) | ||||
| 		//show a nice page links to available entries | ||||
| 		ShowMultipleChoices(w, r, list) | ||||
| 		sp.Finish() | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	// check the root chunk exists by retrieving the file's size | ||||
| 	if _, err := reader.Size(nil); err != nil { | ||||
| 	if _, err := reader.Size(ctx, nil); err != nil { | ||||
| 		getFileNotFound.Inc(1) | ||||
| 		Respond(w, r, fmt.Sprintf("file not found %s: %s", r.uri, err), http.StatusNotFound) | ||||
| 		sp.Finish() | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	buf, err := ioutil.ReadAll(newBufferedReadSeeker(reader, getFileBufferSize)) | ||||
| 	if err != nil { | ||||
| 		getFileNotFound.Inc(1) | ||||
| 		Respond(w, r, fmt.Sprintf("file not found %s: %s", r.uri, err), http.StatusNotFound) | ||||
| 		sp.Finish() | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	log.Debug("got response in buffer", "len", len(buf), "ruid", r.ruid) | ||||
| 	sp.Finish() | ||||
|  | ||||
| 	w.Header().Set("Content-Type", contentType) | ||||
| 	http.ServeContent(w, &r.Request, "", time.Now(), newBufferedReadSeeker(reader, getFileBufferSize)) | ||||
| 	http.ServeContent(w, &r.Request, "", time.Now(), bytes.NewReader(buf)) | ||||
| } | ||||
|  | ||||
| // The size of buffer used for bufio.Reader on LazyChunkReader passed to | ||||
|   | ||||
| @@ -212,10 +212,10 @@ func loadManifest(ctx context.Context, fileStore *storage.FileStore, hash storag | ||||
| 	return readManifest(manifestReader, hash, fileStore, isEncrypted, quitC) | ||||
| } | ||||
|  | ||||
| func readManifest(manifestReader storage.LazySectionReader, hash storage.Address, fileStore *storage.FileStore, isEncrypted bool, quitC chan bool) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand | ||||
| func readManifest(mr storage.LazySectionReader, hash storage.Address, fileStore *storage.FileStore, isEncrypted bool, quitC chan bool) (trie *manifestTrie, err error) { // non-recursive, subtrees are downloaded on-demand | ||||
|  | ||||
| 	// TODO check size for oversized manifests | ||||
| 	size, err := manifestReader.Size(quitC) | ||||
| 	size, err := mr.Size(mr.Context(), quitC) | ||||
| 	if err != nil { // size == 0 | ||||
| 		// can't determine size means we don't have the root chunk | ||||
| 		log.Trace("manifest not found", "key", hash) | ||||
| @@ -228,7 +228,7 @@ func readManifest(manifestReader storage.LazySectionReader, hash storage.Address | ||||
| 		return | ||||
| 	} | ||||
| 	manifestData := make([]byte, size) | ||||
| 	read, err := manifestReader.Read(manifestData) | ||||
| 	read, err := mr.Read(manifestData) | ||||
| 	if int64(read) < size { | ||||
| 		log.Trace("manifest not found", "key", hash) | ||||
| 		if err == nil { | ||||
|   | ||||
| @@ -72,7 +72,7 @@ func (s *Storage) Get(ctx context.Context, bzzpath string) (*Response, error) { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	quitC := make(chan bool) | ||||
| 	expsize, err := reader.Size(quitC) | ||||
| 	expsize, err := reader.Size(ctx, quitC) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|   | ||||
| @@ -86,7 +86,7 @@ func (sf *SwarmFile) Attr(ctx context.Context, a *fuse.Attr) error { | ||||
| 	if sf.fileSize == -1 { | ||||
| 		reader, _ := sf.mountInfo.swarmApi.Retrieve(ctx, sf.addr) | ||||
| 		quitC := make(chan bool) | ||||
| 		size, err := reader.Size(quitC) | ||||
| 		size, err := reader.Size(ctx, quitC) | ||||
| 		if err != nil { | ||||
| 			log.Error("Couldnt get size of file %s : %v", sf.path, err) | ||||
| 			return err | ||||
|   | ||||
| @@ -17,6 +17,7 @@ | ||||
| package network | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"sync" | ||||
|  | ||||
| @@ -48,7 +49,7 @@ func newDiscovery(p *BzzPeer, o Overlay) *discPeer { | ||||
| } | ||||
|  | ||||
| // HandleMsg is the message handler that delegates incoming messages | ||||
| func (d *discPeer) HandleMsg(msg interface{}) error { | ||||
| func (d *discPeer) HandleMsg(ctx context.Context, msg interface{}) error { | ||||
| 	switch msg := msg.(type) { | ||||
|  | ||||
| 	case *peersMsg: | ||||
| @@ -99,14 +100,14 @@ func (d *discPeer) NotifyPeer(a OverlayAddr, po uint8) { | ||||
| 	resp := &peersMsg{ | ||||
| 		Peers: []*BzzAddr{ToAddr(a)}, | ||||
| 	} | ||||
| 	go d.Send(resp) | ||||
| 	go d.Send(context.TODO(), resp) | ||||
| } | ||||
|  | ||||
| // NotifyDepth sends a subPeers Msg to the receiver notifying them about | ||||
| // a change in the depth of saturation | ||||
| func (d *discPeer) NotifyDepth(po uint8) { | ||||
| 	// log.Trace(fmt.Sprintf("%08x peer %08x notified of new depth %v", d.localAddr.Over()[:4], d.Address()[:4], po)) | ||||
| 	go d.Send(&subPeersMsg{Depth: po}) | ||||
| 	go d.Send(context.TODO(), &subPeersMsg{Depth: po}) | ||||
| } | ||||
|  | ||||
| /* | ||||
| @@ -178,7 +179,7 @@ func (d *discPeer) handleSubPeersMsg(msg *subPeersMsg) error { | ||||
| 		}) | ||||
| 		if len(peers) > 0 { | ||||
| 			// log.Debug(fmt.Sprintf("%08x: %v peers sent to %v", d.overlay.BaseAddr(), len(peers), d)) | ||||
| 			go d.Send(&peersMsg{Peers: peers}) | ||||
| 			go d.Send(context.TODO(), &peersMsg{Peers: peers}) | ||||
| 		} | ||||
| 	} | ||||
| 	d.sentPeers = true | ||||
|   | ||||
| @@ -82,9 +82,9 @@ type Peer interface { | ||||
| type Conn interface { | ||||
| 	ID() discover.NodeID                                                                  // the key that uniquely identifies the Node for the peerPool | ||||
| 	Handshake(context.Context, interface{}, func(interface{}) error) (interface{}, error) // can send messages | ||||
| 	Send(interface{}) error                                                               // can send messages | ||||
| 	Send(context.Context, interface{}) error                                              // can send messages | ||||
| 	Drop(error)                                                                           // disconnect this peer | ||||
| 	Run(func(interface{}) error) error                                                    // the run function to run a protocol | ||||
| 	Run(func(context.Context, interface{}) error) error                                   // the run function to run a protocol | ||||
| 	Off() OverlayAddr | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -126,7 +126,7 @@ func NewStreamerService(ctx *adapters.ServiceContext) (node.Service, error) { | ||||
| 	return testRegistry, nil | ||||
| } | ||||
|  | ||||
| func defaultRetrieveFunc(id discover.NodeID) func(chunk *storage.Chunk) error { | ||||
| func defaultRetrieveFunc(id discover.NodeID) func(ctx context.Context, chunk *storage.Chunk) error { | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| @@ -217,14 +217,14 @@ func newRoundRobinStore(stores ...storage.ChunkStore) *roundRobinStore { | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (rrs *roundRobinStore) Get(addr storage.Address) (*storage.Chunk, error) { | ||||
| func (rrs *roundRobinStore) Get(ctx context.Context, addr storage.Address) (*storage.Chunk, error) { | ||||
| 	return nil, errors.New("get not well defined on round robin store") | ||||
| } | ||||
|  | ||||
| func (rrs *roundRobinStore) Put(chunk *storage.Chunk) { | ||||
| func (rrs *roundRobinStore) Put(ctx context.Context, chunk *storage.Chunk) { | ||||
| 	i := atomic.AddUint32(&rrs.index, 1) | ||||
| 	idx := int(i) % len(rrs.stores) | ||||
| 	rrs.stores[idx].Put(chunk) | ||||
| 	rrs.stores[idx].Put(ctx, chunk) | ||||
| } | ||||
|  | ||||
| func (rrs *roundRobinStore) Close() { | ||||
| @@ -369,8 +369,8 @@ func newTestExternalClient(db *storage.DBAPI) *testExternalClient { | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (c *testExternalClient) NeedData(hash []byte) func() { | ||||
| 	chunk, _ := c.db.GetOrCreateRequest(hash) | ||||
| func (c *testExternalClient) NeedData(ctx context.Context, hash []byte) func() { | ||||
| 	chunk, _ := c.db.GetOrCreateRequest(ctx, hash) | ||||
| 	if chunk.ReqC == nil { | ||||
| 		return nil | ||||
| 	} | ||||
| @@ -429,7 +429,7 @@ func (s *testExternalServer) SetNextBatch(from uint64, to uint64) ([]byte, uint6 | ||||
| 	return b, from, to, nil, nil | ||||
| } | ||||
|  | ||||
| func (s *testExternalServer) GetData([]byte) ([]byte, error) { | ||||
| func (s *testExternalServer) GetData(context.Context, []byte) ([]byte, error) { | ||||
| 	return make([]byte, 4096), nil | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -17,6 +17,7 @@ | ||||
| package stream | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"errors" | ||||
| 	"time" | ||||
|  | ||||
| @@ -25,7 +26,9 @@ import ( | ||||
| 	"github.com/ethereum/go-ethereum/p2p/discover" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/log" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/network" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/spancontext" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/storage" | ||||
| 	opentracing "github.com/opentracing/opentracing-go" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| @@ -118,8 +121,8 @@ func (s *SwarmChunkServer) Close() { | ||||
| } | ||||
|  | ||||
| // GetData retrives chunk data from db store | ||||
| func (s *SwarmChunkServer) GetData(key []byte) ([]byte, error) { | ||||
| 	chunk, err := s.db.Get(storage.Address(key)) | ||||
| func (s *SwarmChunkServer) GetData(ctx context.Context, key []byte) ([]byte, error) { | ||||
| 	chunk, err := s.db.Get(ctx, storage.Address(key)) | ||||
| 	if err == storage.ErrFetching { | ||||
| 		<-chunk.ReqC | ||||
| 	} else if err != nil { | ||||
| @@ -134,25 +137,37 @@ type RetrieveRequestMsg struct { | ||||
| 	SkipCheck bool | ||||
| } | ||||
|  | ||||
| func (d *Delivery) handleRetrieveRequestMsg(sp *Peer, req *RetrieveRequestMsg) error { | ||||
| func (d *Delivery) handleRetrieveRequestMsg(ctx context.Context, sp *Peer, req *RetrieveRequestMsg) error { | ||||
| 	log.Trace("received request", "peer", sp.ID(), "hash", req.Addr) | ||||
| 	handleRetrieveRequestMsgCount.Inc(1) | ||||
|  | ||||
| 	var osp opentracing.Span | ||||
| 	ctx, osp = spancontext.StartSpan( | ||||
| 		ctx, | ||||
| 		"retrieve.request") | ||||
| 	defer osp.Finish() | ||||
|  | ||||
| 	s, err := sp.getServer(NewStream(swarmChunkServerStreamName, "", false)) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	streamer := s.Server.(*SwarmChunkServer) | ||||
| 	chunk, created := d.db.GetOrCreateRequest(req.Addr) | ||||
| 	chunk, created := d.db.GetOrCreateRequest(ctx, req.Addr) | ||||
| 	if chunk.ReqC != nil { | ||||
| 		if created { | ||||
| 			if err := d.RequestFromPeers(chunk.Addr[:], true, sp.ID()); err != nil { | ||||
| 			if err := d.RequestFromPeers(ctx, chunk.Addr[:], true, sp.ID()); err != nil { | ||||
| 				log.Warn("unable to forward chunk request", "peer", sp.ID(), "key", chunk.Addr, "err", err) | ||||
| 				chunk.SetErrored(storage.ErrChunkForward) | ||||
| 				return nil | ||||
| 			} | ||||
| 		} | ||||
| 		go func() { | ||||
| 			var osp opentracing.Span | ||||
| 			ctx, osp = spancontext.StartSpan( | ||||
| 				ctx, | ||||
| 				"waiting.delivery") | ||||
| 			defer osp.Finish() | ||||
|  | ||||
| 			t := time.NewTimer(10 * time.Minute) | ||||
| 			defer t.Stop() | ||||
|  | ||||
| @@ -169,7 +184,7 @@ func (d *Delivery) handleRetrieveRequestMsg(sp *Peer, req *RetrieveRequestMsg) e | ||||
| 			chunk.SetErrored(nil) | ||||
|  | ||||
| 			if req.SkipCheck { | ||||
| 				err := sp.Deliver(chunk, s.priority) | ||||
| 				err := sp.Deliver(ctx, chunk, s.priority) | ||||
| 				if err != nil { | ||||
| 					log.Warn("ERROR in handleRetrieveRequestMsg, DROPPING peer!", "err", err) | ||||
| 					sp.Drop(err) | ||||
| @@ -185,7 +200,7 @@ func (d *Delivery) handleRetrieveRequestMsg(sp *Peer, req *RetrieveRequestMsg) e | ||||
| 		if length := len(chunk.SData); length < 9 { | ||||
| 			log.Error("Chunk.SData to deliver is too short", "len(chunk.SData)", length, "address", chunk.Addr) | ||||
| 		} | ||||
| 		return sp.Deliver(chunk, s.priority) | ||||
| 		return sp.Deliver(ctx, chunk, s.priority) | ||||
| 	} | ||||
| 	streamer.deliveryC <- chunk.Addr[:] | ||||
| 	return nil | ||||
| @@ -197,7 +212,13 @@ type ChunkDeliveryMsg struct { | ||||
| 	peer  *Peer  // set in handleChunkDeliveryMsg | ||||
| } | ||||
|  | ||||
| func (d *Delivery) handleChunkDeliveryMsg(sp *Peer, req *ChunkDeliveryMsg) error { | ||||
| func (d *Delivery) handleChunkDeliveryMsg(ctx context.Context, sp *Peer, req *ChunkDeliveryMsg) error { | ||||
| 	var osp opentracing.Span | ||||
| 	ctx, osp = spancontext.StartSpan( | ||||
| 		ctx, | ||||
| 		"chunk.delivery") | ||||
| 	defer osp.Finish() | ||||
|  | ||||
| 	req.peer = sp | ||||
| 	d.receiveC <- req | ||||
| 	return nil | ||||
| @@ -209,7 +230,7 @@ R: | ||||
| 		processReceivedChunksCount.Inc(1) | ||||
|  | ||||
| 		// this should be has locally | ||||
| 		chunk, err := d.db.Get(req.Addr) | ||||
| 		chunk, err := d.db.Get(context.TODO(), req.Addr) | ||||
| 		if err == nil { | ||||
| 			continue R | ||||
| 		} | ||||
| @@ -224,7 +245,7 @@ R: | ||||
| 		default: | ||||
| 		} | ||||
| 		chunk.SData = req.SData | ||||
| 		d.db.Put(chunk) | ||||
| 		d.db.Put(context.TODO(), chunk) | ||||
|  | ||||
| 		go func(req *ChunkDeliveryMsg) { | ||||
| 			err := chunk.WaitToStore() | ||||
| @@ -236,10 +257,11 @@ R: | ||||
| } | ||||
|  | ||||
| // RequestFromPeers sends a chunk retrieve request to | ||||
| func (d *Delivery) RequestFromPeers(hash []byte, skipCheck bool, peersToSkip ...discover.NodeID) error { | ||||
| func (d *Delivery) RequestFromPeers(ctx context.Context, hash []byte, skipCheck bool, peersToSkip ...discover.NodeID) error { | ||||
| 	var success bool | ||||
| 	var err error | ||||
| 	requestFromPeersCount.Inc(1) | ||||
|  | ||||
| 	d.overlay.EachConn(hash, 255, func(p network.OverlayConn, po int, nn bool) bool { | ||||
| 		spId := p.(network.Peer).ID() | ||||
| 		for _, p := range peersToSkip { | ||||
| @@ -253,8 +275,7 @@ func (d *Delivery) RequestFromPeers(hash []byte, skipCheck bool, peersToSkip ... | ||||
| 			log.Warn("Delivery.RequestFromPeers: peer not found", "id", spId) | ||||
| 			return true | ||||
| 		} | ||||
| 		// TODO: skip light nodes that do not accept retrieve requests | ||||
| 		err = sp.SendPriority(&RetrieveRequestMsg{ | ||||
| 		err = sp.SendPriority(ctx, &RetrieveRequestMsg{ | ||||
| 			Addr:      hash, | ||||
| 			SkipCheck: skipCheck, | ||||
| 		}, Top) | ||||
|   | ||||
| @@ -46,7 +46,7 @@ func TestStreamerRetrieveRequest(t *testing.T) { | ||||
|  | ||||
| 	peerID := tester.IDs[0] | ||||
|  | ||||
| 	streamer.delivery.RequestFromPeers(hash0[:], true) | ||||
| 	streamer.delivery.RequestFromPeers(context.TODO(), hash0[:], true) | ||||
|  | ||||
| 	err = tester.TestExchanges(p2ptest.Exchange{ | ||||
| 		Label: "RetrieveRequestMsg", | ||||
| @@ -80,7 +80,7 @@ func TestStreamerUpstreamRetrieveRequestMsgExchangeWithoutStore(t *testing.T) { | ||||
|  | ||||
| 	peer := streamer.getPeer(peerID) | ||||
|  | ||||
| 	peer.handleSubscribeMsg(&SubscribeMsg{ | ||||
| 	peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{ | ||||
| 		Stream:   NewStream(swarmChunkServerStreamName, "", false), | ||||
| 		History:  nil, | ||||
| 		Priority: Top, | ||||
| @@ -131,7 +131,7 @@ func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) { | ||||
|  | ||||
| 	stream := NewStream(swarmChunkServerStreamName, "", false) | ||||
|  | ||||
| 	peer.handleSubscribeMsg(&SubscribeMsg{ | ||||
| 	peer.handleSubscribeMsg(context.TODO(), &SubscribeMsg{ | ||||
| 		Stream:   stream, | ||||
| 		History:  nil, | ||||
| 		Priority: Top, | ||||
| @@ -140,7 +140,7 @@ func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) { | ||||
| 	hash := storage.Address(hash0[:]) | ||||
| 	chunk := storage.NewChunk(hash, nil) | ||||
| 	chunk.SData = hash | ||||
| 	localStore.Put(chunk) | ||||
| 	localStore.Put(context.TODO(), chunk) | ||||
| 	chunk.WaitToStore() | ||||
|  | ||||
| 	err = tester.TestExchanges(p2ptest.Exchange{ | ||||
| @@ -179,7 +179,7 @@ func TestStreamerUpstreamRetrieveRequestMsgExchange(t *testing.T) { | ||||
| 	hash = storage.Address(hash1[:]) | ||||
| 	chunk = storage.NewChunk(hash, nil) | ||||
| 	chunk.SData = hash1[:] | ||||
| 	localStore.Put(chunk) | ||||
| 	localStore.Put(context.TODO(), chunk) | ||||
| 	chunk.WaitToStore() | ||||
|  | ||||
| 	err = tester.TestExchanges(p2ptest.Exchange{ | ||||
| @@ -234,7 +234,7 @@ func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) { | ||||
|  | ||||
| 	chunkKey := hash0[:] | ||||
| 	chunkData := hash1[:] | ||||
| 	chunk, created := localStore.GetOrCreateRequest(chunkKey) | ||||
| 	chunk, created := localStore.GetOrCreateRequest(context.TODO(), chunkKey) | ||||
|  | ||||
| 	if !created { | ||||
| 		t.Fatal("chunk already exists") | ||||
| @@ -285,7 +285,7 @@ func TestStreamerDownstreamChunkDeliveryMsgExchange(t *testing.T) { | ||||
| 	case <-chunk.ReqC: | ||||
| 	} | ||||
|  | ||||
| 	storedChunk, err := localStore.Get(chunkKey) | ||||
| 	storedChunk, err := localStore.Get(context.TODO(), chunkKey) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("Expected no error, got %v", err) | ||||
| 	} | ||||
| @@ -401,8 +401,8 @@ func testDeliveryFromNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck | ||||
| 		} | ||||
| 		// create a retriever FileStore for the pivot node | ||||
| 		delivery := deliveries[sim.IDs[0]] | ||||
| 		retrieveFunc := func(chunk *storage.Chunk) error { | ||||
| 			return delivery.RequestFromPeers(chunk.Addr[:], skipCheck) | ||||
| 		retrieveFunc := func(ctx context.Context, chunk *storage.Chunk) error { | ||||
| 			return delivery.RequestFromPeers(ctx, chunk.Addr[:], skipCheck) | ||||
| 		} | ||||
| 		netStore := storage.NewNetStore(sim.Stores[0].(*storage.LocalStore), retrieveFunc) | ||||
| 		fileStore := storage.NewFileStore(netStore, storage.NewFileStoreParams()) | ||||
| @@ -617,8 +617,8 @@ func benchmarkDeliveryFromNodes(b *testing.B, nodes, conns, chunkCount int, skip | ||||
| 	// create a retriever FileStore for the pivot node | ||||
| 	// by now deliveries are set for each node by the streamer service | ||||
| 	delivery := deliveries[sim.IDs[0]] | ||||
| 	retrieveFunc := func(chunk *storage.Chunk) error { | ||||
| 		return delivery.RequestFromPeers(chunk.Addr[:], skipCheck) | ||||
| 	retrieveFunc := func(ctx context.Context, chunk *storage.Chunk) error { | ||||
| 		return delivery.RequestFromPeers(ctx, chunk.Addr[:], skipCheck) | ||||
| 	} | ||||
| 	netStore := storage.NewNetStore(sim.Stores[0].(*storage.LocalStore), retrieveFunc) | ||||
|  | ||||
| @@ -650,7 +650,7 @@ Loop: | ||||
| 		errs := make(chan error) | ||||
| 		for _, hash := range hashes { | ||||
| 			go func(h storage.Address) { | ||||
| 				_, err := netStore.Get(h) | ||||
| 				_, err := netStore.Get(ctx, h) | ||||
| 				log.Warn("test check netstore get", "hash", h, "err", err) | ||||
| 				errs <- err | ||||
| 			}(hash) | ||||
|   | ||||
| @@ -17,6 +17,7 @@ | ||||
| package stream | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"sync" | ||||
| @@ -25,7 +26,9 @@ import ( | ||||
| 	"github.com/ethereum/go-ethereum/metrics" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/log" | ||||
| 	bv "github.com/ethereum/go-ethereum/swarm/network/bitvector" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/spancontext" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/storage" | ||||
| 	opentracing "github.com/opentracing/opentracing-go" | ||||
| ) | ||||
|  | ||||
| // Stream defines a unique stream identifier. | ||||
| @@ -71,17 +74,17 @@ type RequestSubscriptionMsg struct { | ||||
| 	Priority uint8  // delivered on priority channel | ||||
| } | ||||
|  | ||||
| func (p *Peer) handleRequestSubscription(req *RequestSubscriptionMsg) (err error) { | ||||
| func (p *Peer) handleRequestSubscription(ctx context.Context, req *RequestSubscriptionMsg) (err error) { | ||||
| 	log.Debug(fmt.Sprintf("handleRequestSubscription: streamer %s to subscribe to %s with stream %s", p.streamer.addr.ID(), p.ID(), req.Stream)) | ||||
| 	return p.streamer.Subscribe(p.ID(), req.Stream, req.History, req.Priority) | ||||
| } | ||||
|  | ||||
| func (p *Peer) handleSubscribeMsg(req *SubscribeMsg) (err error) { | ||||
| func (p *Peer) handleSubscribeMsg(ctx context.Context, req *SubscribeMsg) (err error) { | ||||
| 	metrics.GetOrRegisterCounter("peer.handlesubscribemsg", nil).Inc(1) | ||||
|  | ||||
| 	defer func() { | ||||
| 		if err != nil { | ||||
| 			if e := p.Send(SubscribeErrorMsg{ | ||||
| 			if e := p.Send(context.TODO(), SubscribeErrorMsg{ | ||||
| 				Error: err.Error(), | ||||
| 			}); e != nil { | ||||
| 				log.Error("send stream subscribe error message", "err", err) | ||||
| @@ -181,9 +184,15 @@ func (m OfferedHashesMsg) String() string { | ||||
|  | ||||
| // handleOfferedHashesMsg protocol msg handler calls the incoming streamer interface | ||||
| // Filter method | ||||
| func (p *Peer) handleOfferedHashesMsg(req *OfferedHashesMsg) error { | ||||
| func (p *Peer) handleOfferedHashesMsg(ctx context.Context, req *OfferedHashesMsg) error { | ||||
| 	metrics.GetOrRegisterCounter("peer.handleofferedhashes", nil).Inc(1) | ||||
|  | ||||
| 	var sp opentracing.Span | ||||
| 	ctx, sp = spancontext.StartSpan( | ||||
| 		ctx, | ||||
| 		"handle.offered.hashes") | ||||
| 	defer sp.Finish() | ||||
|  | ||||
| 	c, _, err := p.getOrSetClient(req.Stream, req.From, req.To) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| @@ -197,7 +206,7 @@ func (p *Peer) handleOfferedHashesMsg(req *OfferedHashesMsg) error { | ||||
| 	for i := 0; i < len(hashes); i += HashSize { | ||||
| 		hash := hashes[i : i+HashSize] | ||||
|  | ||||
| 		if wait := c.NeedData(hash); wait != nil { | ||||
| 		if wait := c.NeedData(ctx, hash); wait != nil { | ||||
| 			want.Set(i/HashSize, true) | ||||
| 			wg.Add(1) | ||||
| 			// create request and wait until the chunk data arrives and is stored | ||||
| @@ -260,7 +269,7 @@ func (p *Peer) handleOfferedHashesMsg(req *OfferedHashesMsg) error { | ||||
| 			return | ||||
| 		} | ||||
| 		log.Trace("sending want batch", "peer", p.ID(), "stream", msg.Stream, "from", msg.From, "to", msg.To) | ||||
| 		err := p.SendPriority(msg, c.priority) | ||||
| 		err := p.SendPriority(ctx, msg, c.priority) | ||||
| 		if err != nil { | ||||
| 			log.Warn("SendPriority err, so dropping peer", "err", err) | ||||
| 			p.Drop(err) | ||||
| @@ -285,7 +294,7 @@ func (m WantedHashesMsg) String() string { | ||||
| // handleWantedHashesMsg protocol msg handler | ||||
| // * sends the next batch of unsynced keys | ||||
| // * sends the actual data chunks as per WantedHashesMsg | ||||
| func (p *Peer) handleWantedHashesMsg(req *WantedHashesMsg) error { | ||||
| func (p *Peer) handleWantedHashesMsg(ctx context.Context, req *WantedHashesMsg) error { | ||||
| 	metrics.GetOrRegisterCounter("peer.handlewantedhashesmsg", nil).Inc(1) | ||||
|  | ||||
| 	log.Trace("received wanted batch", "peer", p.ID(), "stream", req.Stream, "from", req.From, "to", req.To) | ||||
| @@ -314,7 +323,7 @@ func (p *Peer) handleWantedHashesMsg(req *WantedHashesMsg) error { | ||||
| 			metrics.GetOrRegisterCounter("peer.handlewantedhashesmsg.actualget", nil).Inc(1) | ||||
|  | ||||
| 			hash := hashes[i*HashSize : (i+1)*HashSize] | ||||
| 			data, err := s.GetData(hash) | ||||
| 			data, err := s.GetData(ctx, hash) | ||||
| 			if err != nil { | ||||
| 				return fmt.Errorf("handleWantedHashesMsg get data %x: %v", hash, err) | ||||
| 			} | ||||
| @@ -323,7 +332,7 @@ func (p *Peer) handleWantedHashesMsg(req *WantedHashesMsg) error { | ||||
| 			if length := len(chunk.SData); length < 9 { | ||||
| 				log.Error("Chunk.SData to sync is too short", "len(chunk.SData)", length, "address", chunk.Addr) | ||||
| 			} | ||||
| 			if err := p.Deliver(chunk, s.priority); err != nil { | ||||
| 			if err := p.Deliver(ctx, chunk, s.priority); err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 		} | ||||
| @@ -363,7 +372,7 @@ func (m TakeoverProofMsg) String() string { | ||||
| 	return fmt.Sprintf("Stream: '%v' [%v-%v], Root: %x, Sig: %x", m.Stream, m.Start, m.End, m.Root, m.Sig) | ||||
| } | ||||
|  | ||||
| func (p *Peer) handleTakeoverProofMsg(req *TakeoverProofMsg) error { | ||||
| func (p *Peer) handleTakeoverProofMsg(ctx context.Context, req *TakeoverProofMsg) error { | ||||
| 	_, err := p.getServer(req.Stream) | ||||
| 	// store the strongest takeoverproof for the stream in streamer | ||||
| 	return err | ||||
|   | ||||
| @@ -27,8 +27,10 @@ import ( | ||||
| 	"github.com/ethereum/go-ethereum/swarm/log" | ||||
| 	pq "github.com/ethereum/go-ethereum/swarm/network/priorityqueue" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/network/stream/intervals" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/spancontext" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/state" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/storage" | ||||
| 	opentracing "github.com/opentracing/opentracing-go" | ||||
| ) | ||||
|  | ||||
| var sendTimeout = 30 * time.Second | ||||
| @@ -62,6 +64,11 @@ type Peer struct { | ||||
| 	quit         chan struct{} | ||||
| } | ||||
|  | ||||
| type WrappedPriorityMsg struct { | ||||
| 	Context context.Context | ||||
| 	Msg     interface{} | ||||
| } | ||||
|  | ||||
| // NewPeer is the constructor for Peer | ||||
| func NewPeer(peer *protocols.Peer, streamer *Registry) *Peer { | ||||
| 	p := &Peer{ | ||||
| @@ -74,7 +81,10 @@ func NewPeer(peer *protocols.Peer, streamer *Registry) *Peer { | ||||
| 		quit:         make(chan struct{}), | ||||
| 	} | ||||
| 	ctx, cancel := context.WithCancel(context.Background()) | ||||
| 	go p.pq.Run(ctx, func(i interface{}) { p.Send(i) }) | ||||
| 	go p.pq.Run(ctx, func(i interface{}) { | ||||
| 		wmsg := i.(WrappedPriorityMsg) | ||||
| 		p.Send(wmsg.Context, wmsg.Msg) | ||||
| 	}) | ||||
| 	go func() { | ||||
| 		<-p.quit | ||||
| 		cancel() | ||||
| @@ -83,25 +93,41 @@ func NewPeer(peer *protocols.Peer, streamer *Registry) *Peer { | ||||
| } | ||||
|  | ||||
| // Deliver sends a storeRequestMsg protocol message to the peer | ||||
| func (p *Peer) Deliver(chunk *storage.Chunk, priority uint8) error { | ||||
| func (p *Peer) Deliver(ctx context.Context, chunk *storage.Chunk, priority uint8) error { | ||||
| 	var sp opentracing.Span | ||||
| 	ctx, sp = spancontext.StartSpan( | ||||
| 		ctx, | ||||
| 		"send.chunk.delivery") | ||||
| 	defer sp.Finish() | ||||
|  | ||||
| 	msg := &ChunkDeliveryMsg{ | ||||
| 		Addr:  chunk.Addr, | ||||
| 		SData: chunk.SData, | ||||
| 	} | ||||
| 	return p.SendPriority(msg, priority) | ||||
| 	return p.SendPriority(ctx, msg, priority) | ||||
| } | ||||
|  | ||||
| // SendPriority sends message to the peer using the outgoing priority queue | ||||
| func (p *Peer) SendPriority(msg interface{}, priority uint8) error { | ||||
| func (p *Peer) SendPriority(ctx context.Context, msg interface{}, priority uint8) error { | ||||
| 	defer metrics.GetOrRegisterResettingTimer(fmt.Sprintf("peer.sendpriority_t.%d", priority), nil).UpdateSince(time.Now()) | ||||
| 	metrics.GetOrRegisterCounter(fmt.Sprintf("peer.sendpriority.%d", priority), nil).Inc(1) | ||||
| 	ctx, cancel := context.WithTimeout(context.Background(), sendTimeout) | ||||
| 	cctx, cancel := context.WithTimeout(context.Background(), sendTimeout) | ||||
| 	defer cancel() | ||||
| 	return p.pq.Push(ctx, msg, int(priority)) | ||||
| 	wmsg := WrappedPriorityMsg{ | ||||
| 		Context: ctx, | ||||
| 		Msg:     msg, | ||||
| 	} | ||||
| 	return p.pq.Push(cctx, wmsg, int(priority)) | ||||
| } | ||||
|  | ||||
| // SendOfferedHashes sends OfferedHashesMsg protocol msg | ||||
| func (p *Peer) SendOfferedHashes(s *server, f, t uint64) error { | ||||
| 	var sp opentracing.Span | ||||
| 	ctx, sp := spancontext.StartSpan( | ||||
| 		context.TODO(), | ||||
| 		"send.offered.hashes") | ||||
| 	defer sp.Finish() | ||||
|  | ||||
| 	hashes, from, to, proof, err := s.SetNextBatch(f, t) | ||||
| 	if err != nil { | ||||
| 		return err | ||||
| @@ -124,7 +150,7 @@ func (p *Peer) SendOfferedHashes(s *server, f, t uint64) error { | ||||
| 		Stream:        s.stream, | ||||
| 	} | ||||
| 	log.Trace("Swarm syncer offer batch", "peer", p.ID(), "stream", s.stream, "len", len(hashes), "from", from, "to", to) | ||||
| 	return p.SendPriority(msg, s.priority) | ||||
| 	return p.SendPriority(ctx, msg, s.priority) | ||||
| } | ||||
|  | ||||
| func (p *Peer) getServer(s Stream) (*server, error) { | ||||
|   | ||||
| @@ -55,10 +55,10 @@ func initRetrievalTest() { | ||||
| 	//deliveries for each node | ||||
| 	deliveries = make(map[discover.NodeID]*Delivery) | ||||
| 	//global retrieve func | ||||
| 	getRetrieveFunc = func(id discover.NodeID) func(chunk *storage.Chunk) error { | ||||
| 		return func(chunk *storage.Chunk) error { | ||||
| 	getRetrieveFunc = func(id discover.NodeID) func(ctx context.Context, chunk *storage.Chunk) error { | ||||
| 		return func(ctx context.Context, chunk *storage.Chunk) error { | ||||
| 			skipCheck := true | ||||
| 			return deliveries[id].RequestFromPeers(chunk.Addr[:], skipCheck) | ||||
| 			return deliveries[id].RequestFromPeers(ctx, chunk.Addr[:], skipCheck) | ||||
| 		} | ||||
| 	} | ||||
| 	//registries, map of discover.NodeID to its streamer | ||||
| @@ -412,7 +412,7 @@ func runFileRetrievalTest(nodeCount int) error { | ||||
| 		for i, hash := range conf.hashes { | ||||
| 			reader, _ := fileStore.Retrieve(context.TODO(), hash) | ||||
| 			//check that we can read the file size and that it corresponds to the generated file size | ||||
| 			if s, err := reader.Size(nil); err != nil || s != int64(len(randomFiles[i])) { | ||||
| 			if s, err := reader.Size(context.TODO(), nil); err != nil || s != int64(len(randomFiles[i])) { | ||||
| 				allSuccess = false | ||||
| 				log.Warn("Retrieve error", "err", err, "hash", hash, "nodeId", id) | ||||
| 			} else { | ||||
| @@ -699,7 +699,7 @@ func runRetrievalTest(chunkCount int, nodeCount int) error { | ||||
| 		for _, chnk := range conf.hashes { | ||||
| 			reader, _ := fileStore.Retrieve(context.TODO(), chnk) | ||||
| 			//assuming that reading the Size of the chunk is enough to know we found it | ||||
| 			if s, err := reader.Size(nil); err != nil || s != chunkSize { | ||||
| 			if s, err := reader.Size(context.TODO(), nil); err != nil || s != chunkSize { | ||||
| 				allSuccess = false | ||||
| 				log.Warn("Retrieve error", "err", err, "chunk", chnk, "nodeId", id) | ||||
| 			} else { | ||||
|   | ||||
| @@ -437,7 +437,7 @@ func runSyncTest(chunkCount int, nodeCount int, live bool, history bool) error { | ||||
| 			} else { | ||||
| 				//use the actual localstore | ||||
| 				lstore := stores[id] | ||||
| 				_, err = lstore.Get(chunk) | ||||
| 				_, err = lstore.Get(context.TODO(), chunk) | ||||
| 			} | ||||
| 			if err != nil { | ||||
| 				log.Warn(fmt.Sprintf("Chunk %s NOT found for id %s", chunk, id)) | ||||
|   | ||||
| @@ -32,8 +32,10 @@ import ( | ||||
| 	"github.com/ethereum/go-ethereum/swarm/network" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/network/stream/intervals" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/pot" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/spancontext" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/state" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/storage" | ||||
| 	opentracing "github.com/opentracing/opentracing-go" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| @@ -235,7 +237,7 @@ func (r *Registry) RequestSubscription(peerId discover.NodeID, s Stream, h *Rang | ||||
| 		if e, ok := err.(*notFoundError); ok && e.t == "server" { | ||||
| 			// request subscription only if the server for this stream is not created | ||||
| 			log.Debug("RequestSubscription ", "peer", peerId, "stream", s, "history", h) | ||||
| 			return peer.Send(&RequestSubscriptionMsg{ | ||||
| 			return peer.Send(context.TODO(), &RequestSubscriptionMsg{ | ||||
| 				Stream:   s, | ||||
| 				History:  h, | ||||
| 				Priority: prio, | ||||
| @@ -285,7 +287,7 @@ func (r *Registry) Subscribe(peerId discover.NodeID, s Stream, h *Range, priorit | ||||
| 	} | ||||
| 	log.Debug("Subscribe ", "peer", peerId, "stream", s, "history", h) | ||||
|  | ||||
| 	return peer.SendPriority(msg, priority) | ||||
| 	return peer.SendPriority(context.TODO(), msg, priority) | ||||
| } | ||||
|  | ||||
| func (r *Registry) Unsubscribe(peerId discover.NodeID, s Stream) error { | ||||
| @@ -299,7 +301,7 @@ func (r *Registry) Unsubscribe(peerId discover.NodeID, s Stream) error { | ||||
| 	} | ||||
| 	log.Debug("Unsubscribe ", "peer", peerId, "stream", s) | ||||
|  | ||||
| 	if err := peer.Send(msg); err != nil { | ||||
| 	if err := peer.Send(context.TODO(), msg); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
| 	return peer.removeClient(s) | ||||
| @@ -320,11 +322,17 @@ func (r *Registry) Quit(peerId discover.NodeID, s Stream) error { | ||||
| 	} | ||||
| 	log.Debug("Quit ", "peer", peerId, "stream", s) | ||||
|  | ||||
| 	return peer.Send(msg) | ||||
| 	return peer.Send(context.TODO(), msg) | ||||
| } | ||||
|  | ||||
| func (r *Registry) Retrieve(chunk *storage.Chunk) error { | ||||
| 	return r.delivery.RequestFromPeers(chunk.Addr[:], r.skipCheck) | ||||
| func (r *Registry) Retrieve(ctx context.Context, chunk *storage.Chunk) error { | ||||
| 	var sp opentracing.Span | ||||
| 	ctx, sp = spancontext.StartSpan( | ||||
| 		ctx, | ||||
| 		"registry.retrieve") | ||||
| 	defer sp.Finish() | ||||
|  | ||||
| 	return r.delivery.RequestFromPeers(ctx, chunk.Addr[:], r.skipCheck) | ||||
| } | ||||
|  | ||||
| func (r *Registry) NodeInfo() interface{} { | ||||
| @@ -460,11 +468,11 @@ func (r *Registry) runProtocol(p *p2p.Peer, rw p2p.MsgReadWriter) error { | ||||
| } | ||||
|  | ||||
| // HandleMsg is the message handler that delegates incoming messages | ||||
| func (p *Peer) HandleMsg(msg interface{}) error { | ||||
| func (p *Peer) HandleMsg(ctx context.Context, msg interface{}) error { | ||||
| 	switch msg := msg.(type) { | ||||
|  | ||||
| 	case *SubscribeMsg: | ||||
| 		return p.handleSubscribeMsg(msg) | ||||
| 		return p.handleSubscribeMsg(ctx, msg) | ||||
|  | ||||
| 	case *SubscribeErrorMsg: | ||||
| 		return p.handleSubscribeErrorMsg(msg) | ||||
| @@ -473,22 +481,22 @@ func (p *Peer) HandleMsg(msg interface{}) error { | ||||
| 		return p.handleUnsubscribeMsg(msg) | ||||
|  | ||||
| 	case *OfferedHashesMsg: | ||||
| 		return p.handleOfferedHashesMsg(msg) | ||||
| 		return p.handleOfferedHashesMsg(ctx, msg) | ||||
|  | ||||
| 	case *TakeoverProofMsg: | ||||
| 		return p.handleTakeoverProofMsg(msg) | ||||
| 		return p.handleTakeoverProofMsg(ctx, msg) | ||||
|  | ||||
| 	case *WantedHashesMsg: | ||||
| 		return p.handleWantedHashesMsg(msg) | ||||
| 		return p.handleWantedHashesMsg(ctx, msg) | ||||
|  | ||||
| 	case *ChunkDeliveryMsg: | ||||
| 		return p.streamer.delivery.handleChunkDeliveryMsg(p, msg) | ||||
| 		return p.streamer.delivery.handleChunkDeliveryMsg(ctx, p, msg) | ||||
|  | ||||
| 	case *RetrieveRequestMsg: | ||||
| 		return p.streamer.delivery.handleRetrieveRequestMsg(p, msg) | ||||
| 		return p.streamer.delivery.handleRetrieveRequestMsg(ctx, p, msg) | ||||
|  | ||||
| 	case *RequestSubscriptionMsg: | ||||
| 		return p.handleRequestSubscription(msg) | ||||
| 		return p.handleRequestSubscription(ctx, msg) | ||||
|  | ||||
| 	case *QuitMsg: | ||||
| 		return p.handleQuitMsg(msg) | ||||
| @@ -508,7 +516,7 @@ type server struct { | ||||
| // Server interface for outgoing peer Streamer | ||||
| type Server interface { | ||||
| 	SetNextBatch(uint64, uint64) (hashes []byte, from uint64, to uint64, proof *HandoverProof, err error) | ||||
| 	GetData([]byte) ([]byte, error) | ||||
| 	GetData(context.Context, []byte) ([]byte, error) | ||||
| 	Close() | ||||
| } | ||||
|  | ||||
| @@ -551,7 +559,7 @@ func (c client) NextInterval() (start, end uint64, err error) { | ||||
|  | ||||
| // Client interface for incoming peer Streamer | ||||
| type Client interface { | ||||
| 	NeedData([]byte) func() | ||||
| 	NeedData(context.Context, []byte) func() | ||||
| 	BatchDone(Stream, uint64, []byte, []byte) func() (*TakeoverProof, error) | ||||
| 	Close() | ||||
| } | ||||
| @@ -588,7 +596,7 @@ func (c *client) batchDone(p *Peer, req *OfferedHashesMsg, hashes []byte) error | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		if err := p.SendPriority(tp, c.priority); err != nil { | ||||
| 		if err := p.SendPriority(context.TODO(), tp, c.priority); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		if c.to > 0 && tp.Takeover.End >= c.to { | ||||
|   | ||||
| @@ -18,6 +18,7 @@ package stream | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"context" | ||||
| 	"testing" | ||||
| 	"time" | ||||
|  | ||||
| @@ -79,7 +80,7 @@ func newTestClient(t string) *testClient { | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (self *testClient) NeedData(hash []byte) func() { | ||||
| func (self *testClient) NeedData(ctx context.Context, hash []byte) func() { | ||||
| 	self.receivedHashes[string(hash)] = hash | ||||
| 	if bytes.Equal(hash, hash0[:]) { | ||||
| 		return func() { | ||||
| @@ -114,7 +115,7 @@ func (self *testServer) SetNextBatch(from uint64, to uint64) ([]byte, uint64, ui | ||||
| 	return make([]byte, HashSize), from + 1, to + 1, nil, nil | ||||
| } | ||||
|  | ||||
| func (self *testServer) GetData([]byte) ([]byte, error) { | ||||
| func (self *testServer) GetData(context.Context, []byte) ([]byte, error) { | ||||
| 	return nil, nil | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -17,6 +17,7 @@ | ||||
| package stream | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"math" | ||||
| 	"strconv" | ||||
| 	"time" | ||||
| @@ -78,8 +79,8 @@ func (s *SwarmSyncerServer) Close() { | ||||
| } | ||||
|  | ||||
| // GetSection retrieves the actual chunk from localstore | ||||
| func (s *SwarmSyncerServer) GetData(key []byte) ([]byte, error) { | ||||
| 	chunk, err := s.db.Get(storage.Address(key)) | ||||
| func (s *SwarmSyncerServer) GetData(ctx context.Context, key []byte) ([]byte, error) { | ||||
| 	chunk, err := s.db.Get(ctx, storage.Address(key)) | ||||
| 	if err == storage.ErrFetching { | ||||
| 		<-chunk.ReqC | ||||
| 	} else if err != nil { | ||||
| @@ -210,8 +211,8 @@ func RegisterSwarmSyncerClient(streamer *Registry, db *storage.DBAPI) { | ||||
| } | ||||
|  | ||||
| // NeedData | ||||
| func (s *SwarmSyncerClient) NeedData(key []byte) (wait func()) { | ||||
| 	chunk, _ := s.db.GetOrCreateRequest(key) | ||||
| func (s *SwarmSyncerClient) NeedData(ctx context.Context, key []byte) (wait func()) { | ||||
| 	chunk, _ := s.db.GetOrCreateRequest(ctx, key) | ||||
| 	// TODO: we may want to request from this peer anyway even if the request exists | ||||
|  | ||||
| 	// ignoreExistingRequest is temporary commented out until its functionality is verified. | ||||
|   | ||||
| @@ -231,7 +231,7 @@ func testSyncBetweenNodes(t *testing.T, nodes, conns, chunkCount int, skipCheck | ||||
| 		for j := i; j < nodes; j++ { | ||||
| 			total += len(hashes[j]) | ||||
| 			for _, key := range hashes[j] { | ||||
| 				chunk, err := dbs[i].Get(key) | ||||
| 				chunk, err := dbs[i].Get(ctx, key) | ||||
| 				if err == storage.ErrFetching { | ||||
| 					<-chunk.ReqC | ||||
| 				} else if err != nil { | ||||
|   | ||||
| @@ -19,6 +19,7 @@ | ||||
| package pss | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"errors" | ||||
| 	"time" | ||||
|  | ||||
| @@ -40,7 +41,7 @@ type Ping struct { | ||||
| 	InC  chan bool // optional, report back to calling code | ||||
| } | ||||
|  | ||||
| func (p *Ping) pingHandler(msg interface{}) error { | ||||
| func (p *Ping) pingHandler(ctx context.Context, msg interface{}) error { | ||||
| 	var pingmsg *PingMsg | ||||
| 	var ok bool | ||||
| 	if pingmsg, ok = msg.(*PingMsg); !ok { | ||||
| @@ -80,7 +81,7 @@ func NewPingProtocol(ping *Ping) *p2p.Protocol { | ||||
| 				for { | ||||
| 					select { | ||||
| 					case ispong := <-ping.OutC: | ||||
| 						pp.Send(&PingMsg{ | ||||
| 						pp.Send(context.TODO(), &PingMsg{ | ||||
| 							Created: time.Now(), | ||||
| 							Pong:    ispong, | ||||
| 						}) | ||||
|   | ||||
| @@ -18,6 +18,7 @@ package pss | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"context" | ||||
| 	"crypto/ecdsa" | ||||
| 	"crypto/rand" | ||||
| 	"errors" | ||||
| @@ -71,7 +72,7 @@ type senderPeer interface { | ||||
| 	Info() *p2p.PeerInfo | ||||
| 	ID() discover.NodeID | ||||
| 	Address() []byte | ||||
| 	Send(interface{}) error | ||||
| 	Send(context.Context, interface{}) error | ||||
| } | ||||
|  | ||||
| // per-key peer related information | ||||
| @@ -344,7 +345,7 @@ func (p *Pss) getHandlers(topic Topic) map[*Handler]bool { | ||||
| // Check if address partially matches | ||||
| // If yes, it CAN be for us, and we process it | ||||
| // Only passes error to pss protocol handler if payload is not valid pssmsg | ||||
| func (p *Pss) handlePssMsg(msg interface{}) error { | ||||
| func (p *Pss) handlePssMsg(ctx context.Context, msg interface{}) error { | ||||
| 	metrics.GetOrRegisterCounter("pss.handlepssmsg", nil).Inc(1) | ||||
|  | ||||
| 	pssmsg, ok := msg.(*PssMsg) | ||||
| @@ -844,7 +845,7 @@ func (p *Pss) forward(msg *PssMsg) error { | ||||
| 		p.fwdPoolMu.RUnlock() | ||||
|  | ||||
| 		// attempt to send the message | ||||
| 		err := pp.Send(msg) | ||||
| 		err := pp.Send(context.TODO(), msg) | ||||
| 		if err != nil { | ||||
| 			metrics.GetOrRegisterCounter("pss.pp.send.error", nil).Inc(1) | ||||
| 			log.Error(err.Error()) | ||||
|   | ||||
| @@ -334,7 +334,7 @@ func TestHandlerConditions(t *testing.T) { | ||||
| 			Data:  []byte{0x66, 0x6f, 0x6f}, | ||||
| 		}, | ||||
| 	} | ||||
| 	if err := ps.handlePssMsg(msg); err != nil { | ||||
| 	if err := ps.handlePssMsg(context.TODO(), msg); err != nil { | ||||
| 		t.Fatal(err.Error()) | ||||
| 	} | ||||
| 	tmr := time.NewTimer(time.Millisecond * 100) | ||||
| @@ -351,7 +351,7 @@ func TestHandlerConditions(t *testing.T) { | ||||
| 	// message should pass and queue due to partial length | ||||
| 	msg.To = addr[0:1] | ||||
| 	msg.Payload.Data = []byte{0x78, 0x79, 0x80, 0x80, 0x79} | ||||
| 	if err := ps.handlePssMsg(msg); err != nil { | ||||
| 	if err := ps.handlePssMsg(context.TODO(), msg); err != nil { | ||||
| 		t.Fatal(err.Error()) | ||||
| 	} | ||||
| 	tmr.Reset(time.Millisecond * 100) | ||||
| @@ -374,7 +374,7 @@ func TestHandlerConditions(t *testing.T) { | ||||
|  | ||||
| 	// full address mismatch should put message in queue | ||||
| 	msg.To[0] = 0xff | ||||
| 	if err := ps.handlePssMsg(msg); err != nil { | ||||
| 	if err := ps.handlePssMsg(context.TODO(), msg); err != nil { | ||||
| 		t.Fatal(err.Error()) | ||||
| 	} | ||||
| 	tmr.Reset(time.Millisecond * 10) | ||||
| @@ -397,7 +397,7 @@ func TestHandlerConditions(t *testing.T) { | ||||
|  | ||||
| 	// expired message should be dropped | ||||
| 	msg.Expire = uint32(time.Now().Add(-time.Second).Unix()) | ||||
| 	if err := ps.handlePssMsg(msg); err != nil { | ||||
| 	if err := ps.handlePssMsg(context.TODO(), msg); err != nil { | ||||
| 		t.Fatal(err.Error()) | ||||
| 	} | ||||
| 	tmr.Reset(time.Millisecond * 10) | ||||
| @@ -417,7 +417,7 @@ func TestHandlerConditions(t *testing.T) { | ||||
| 	}{ | ||||
| 		pssMsg: &PssMsg{}, | ||||
| 	} | ||||
| 	if err := ps.handlePssMsg(fckedupmsg); err == nil { | ||||
| 	if err := ps.handlePssMsg(context.TODO(), fckedupmsg); err == nil { | ||||
| 		t.Fatalf("expected error from processMsg but error nil") | ||||
| 	} | ||||
|  | ||||
| @@ -427,7 +427,7 @@ func TestHandlerConditions(t *testing.T) { | ||||
| 		ps.outbox <- msg | ||||
| 	} | ||||
| 	msg.Payload.Data = []byte{0x62, 0x61, 0x72} | ||||
| 	err = ps.handlePssMsg(msg) | ||||
| 	err = ps.handlePssMsg(context.TODO(), msg) | ||||
| 	if err == nil { | ||||
| 		t.Fatal("expected error when mailbox full, but was nil") | ||||
| 	} | ||||
|   | ||||
							
								
								
									
										49
									
								
								swarm/spancontext/spancontext.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										49
									
								
								swarm/spancontext/spancontext.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,49 @@ | ||||
| package spancontext | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
|  | ||||
| 	opentracing "github.com/opentracing/opentracing-go" | ||||
| ) | ||||
|  | ||||
| func WithContext(ctx context.Context, sctx opentracing.SpanContext) context.Context { | ||||
| 	return context.WithValue(ctx, "span_context", sctx) | ||||
| } | ||||
|  | ||||
| func FromContext(ctx context.Context) opentracing.SpanContext { | ||||
| 	sctx, ok := ctx.Value("span_context").(opentracing.SpanContext) | ||||
| 	if ok { | ||||
| 		return sctx | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func StartSpan(ctx context.Context, name string) (context.Context, opentracing.Span) { | ||||
| 	tracer := opentracing.GlobalTracer() | ||||
|  | ||||
| 	sctx := FromContext(ctx) | ||||
|  | ||||
| 	var sp opentracing.Span | ||||
| 	if sctx != nil { | ||||
| 		sp = tracer.StartSpan( | ||||
| 			name, | ||||
| 			opentracing.ChildOf(sctx)) | ||||
| 	} else { | ||||
| 		sp = tracer.StartSpan(name) | ||||
| 	} | ||||
|  | ||||
| 	nctx := context.WithValue(ctx, "span_context", sp.Context()) | ||||
|  | ||||
| 	return nctx, sp | ||||
| } | ||||
|  | ||||
| func StartSpanFrom(name string, sctx opentracing.SpanContext) opentracing.Span { | ||||
| 	tracer := opentracing.GlobalTracer() | ||||
|  | ||||
| 	sp := tracer.StartSpan( | ||||
| 		name, | ||||
| 		opentracing.ChildOf(sctx)) | ||||
|  | ||||
| 	return sp | ||||
| } | ||||
| @@ -26,6 +26,9 @@ import ( | ||||
|  | ||||
| 	"github.com/ethereum/go-ethereum/metrics" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/log" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/spancontext" | ||||
| 	opentracing "github.com/opentracing/opentracing-go" | ||||
| 	olog "github.com/opentracing/opentracing-go/log" | ||||
| ) | ||||
|  | ||||
| /* | ||||
| @@ -93,9 +96,12 @@ type JoinerParams struct { | ||||
| 	getter Getter | ||||
| 	// TODO: there is a bug, so depth can only be 0 today, see: https://github.com/ethersphere/go-ethereum/issues/344 | ||||
| 	depth int | ||||
| 	ctx   context.Context | ||||
| } | ||||
|  | ||||
| type TreeChunker struct { | ||||
| 	ctx context.Context | ||||
|  | ||||
| 	branches int64 | ||||
| 	hashFunc SwarmHasher | ||||
| 	dataSize int64 | ||||
| @@ -136,6 +142,7 @@ func TreeJoin(ctx context.Context, addr Address, getter Getter, depth int) *Lazy | ||||
| 		addr:   addr, | ||||
| 		getter: getter, | ||||
| 		depth:  depth, | ||||
| 		ctx:    ctx, | ||||
| 	} | ||||
|  | ||||
| 	return NewTreeJoiner(jp).Join(ctx) | ||||
| @@ -174,6 +181,8 @@ func NewTreeJoiner(params *JoinerParams) *TreeChunker { | ||||
| 	tc.errC = make(chan error) | ||||
| 	tc.quitC = make(chan bool) | ||||
|  | ||||
| 	tc.ctx = params.ctx | ||||
|  | ||||
| 	return tc | ||||
| } | ||||
|  | ||||
| @@ -351,7 +360,7 @@ func (tc *TreeChunker) runWorker() { | ||||
| 					return | ||||
| 				} | ||||
|  | ||||
| 				h, err := tc.putter.Put(job.chunk) | ||||
| 				h, err := tc.putter.Put(tc.ctx, job.chunk) | ||||
| 				if err != nil { | ||||
| 					tc.errC <- err | ||||
| 					return | ||||
| @@ -371,6 +380,7 @@ func (tc *TreeChunker) Append() (Address, func(), error) { | ||||
|  | ||||
| // LazyChunkReader implements LazySectionReader | ||||
| type LazyChunkReader struct { | ||||
| 	Ctx       context.Context | ||||
| 	key       Address // root key | ||||
| 	chunkData ChunkData | ||||
| 	off       int64 // offset | ||||
| @@ -389,16 +399,28 @@ func (tc *TreeChunker) Join(ctx context.Context) *LazyChunkReader { | ||||
| 		hashSize:  tc.hashSize, | ||||
| 		depth:     tc.depth, | ||||
| 		getter:    tc.getter, | ||||
| 		Ctx:       tc.ctx, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (r *LazyChunkReader) Context() context.Context { | ||||
| 	return r.Ctx | ||||
| } | ||||
|  | ||||
| // Size is meant to be called on the LazySectionReader | ||||
| func (r *LazyChunkReader) Size(quitC chan bool) (n int64, err error) { | ||||
| func (r *LazyChunkReader) Size(ctx context.Context, quitC chan bool) (n int64, err error) { | ||||
| 	metrics.GetOrRegisterCounter("lazychunkreader.size", nil).Inc(1) | ||||
|  | ||||
| 	var sp opentracing.Span | ||||
| 	var cctx context.Context | ||||
| 	cctx, sp = spancontext.StartSpan( | ||||
| 		ctx, | ||||
| 		"lcr.size") | ||||
| 	defer sp.Finish() | ||||
|  | ||||
| 	log.Debug("lazychunkreader.size", "key", r.key) | ||||
| 	if r.chunkData == nil { | ||||
| 		chunkData, err := r.getter.Get(Reference(r.key)) | ||||
| 		chunkData, err := r.getter.Get(cctx, Reference(r.key)) | ||||
| 		if err != nil { | ||||
| 			return 0, err | ||||
| 		} | ||||
| @@ -421,12 +443,25 @@ func (r *LazyChunkReader) Size(quitC chan bool) (n int64, err error) { | ||||
| func (r *LazyChunkReader) ReadAt(b []byte, off int64) (read int, err error) { | ||||
| 	metrics.GetOrRegisterCounter("lazychunkreader.readat", nil).Inc(1) | ||||
|  | ||||
| 	var sp opentracing.Span | ||||
| 	var cctx context.Context | ||||
| 	cctx, sp = spancontext.StartSpan( | ||||
| 		r.Ctx, | ||||
| 		"lcr.read") | ||||
| 	defer sp.Finish() | ||||
|  | ||||
| 	defer func() { | ||||
| 		sp.LogFields( | ||||
| 			olog.Int("off", int(off)), | ||||
| 			olog.Int("read", read)) | ||||
| 	}() | ||||
|  | ||||
| 	// this is correct, a swarm doc cannot be zero length, so no EOF is expected | ||||
| 	if len(b) == 0 { | ||||
| 		return 0, nil | ||||
| 	} | ||||
| 	quitC := make(chan bool) | ||||
| 	size, err := r.Size(quitC) | ||||
| 	size, err := r.Size(cctx, quitC) | ||||
| 	if err != nil { | ||||
| 		log.Error("lazychunkreader.readat.size", "size", size, "err", err) | ||||
| 		return 0, err | ||||
| @@ -449,7 +484,7 @@ func (r *LazyChunkReader) ReadAt(b []byte, off int64) (read int, err error) { | ||||
| 		length *= r.chunkSize | ||||
| 	} | ||||
| 	wg.Add(1) | ||||
| 	go r.join(b, off, off+length, depth, treeSize/r.branches, r.chunkData, &wg, errC, quitC) | ||||
| 	go r.join(cctx, b, off, off+length, depth, treeSize/r.branches, r.chunkData, &wg, errC, quitC) | ||||
| 	go func() { | ||||
| 		wg.Wait() | ||||
| 		close(errC) | ||||
| @@ -467,7 +502,7 @@ func (r *LazyChunkReader) ReadAt(b []byte, off int64) (read int, err error) { | ||||
| 	return len(b), nil | ||||
| } | ||||
|  | ||||
| func (r *LazyChunkReader) join(b []byte, off int64, eoff int64, depth int, treeSize int64, chunkData ChunkData, parentWg *sync.WaitGroup, errC chan error, quitC chan bool) { | ||||
| func (r *LazyChunkReader) join(ctx context.Context, b []byte, off int64, eoff int64, depth int, treeSize int64, chunkData ChunkData, parentWg *sync.WaitGroup, errC chan error, quitC chan bool) { | ||||
| 	defer parentWg.Done() | ||||
| 	// find appropriate block level | ||||
| 	for chunkData.Size() < treeSize && depth > r.depth { | ||||
| @@ -514,7 +549,7 @@ func (r *LazyChunkReader) join(b []byte, off int64, eoff int64, depth int, treeS | ||||
| 		wg.Add(1) | ||||
| 		go func(j int64) { | ||||
| 			childKey := chunkData[8+j*r.hashSize : 8+(j+1)*r.hashSize] | ||||
| 			chunkData, err := r.getter.Get(Reference(childKey)) | ||||
| 			chunkData, err := r.getter.Get(ctx, Reference(childKey)) | ||||
| 			if err != nil { | ||||
| 				log.Error("lazychunkreader.join", "key", fmt.Sprintf("%x", childKey), "err", err) | ||||
| 				select { | ||||
| @@ -533,7 +568,7 @@ func (r *LazyChunkReader) join(b []byte, off int64, eoff int64, depth int, treeS | ||||
| 			if soff < off { | ||||
| 				soff = off | ||||
| 			} | ||||
| 			r.join(b[soff-off:seoff-off], soff-roff, seoff-roff, depth-1, treeSize/r.branches, chunkData, wg, errC, quitC) | ||||
| 			r.join(ctx, b[soff-off:seoff-off], soff-roff, seoff-roff, depth-1, treeSize/r.branches, chunkData, wg, errC, quitC) | ||||
| 		}(i) | ||||
| 	} //for | ||||
| } | ||||
| @@ -570,7 +605,7 @@ func (r *LazyChunkReader) Seek(offset int64, whence int) (int64, error) { | ||||
| 		offset += r.off | ||||
| 	case 2: | ||||
| 		if r.chunkData == nil { //seek from the end requires rootchunk for size. call Size first | ||||
| 			_, err := r.Size(nil) | ||||
| 			_, err := r.Size(context.TODO(), nil) | ||||
| 			if err != nil { | ||||
| 				return 0, fmt.Errorf("can't get size: %v", err) | ||||
| 			} | ||||
|   | ||||
| @@ -50,11 +50,11 @@ type fakeChunkStore struct { | ||||
| } | ||||
|  | ||||
| // Put doesn't store anything it is just here to implement ChunkStore | ||||
| func (f *fakeChunkStore) Put(*Chunk) { | ||||
| func (f *fakeChunkStore) Put(context.Context, *Chunk) { | ||||
| } | ||||
|  | ||||
| // Gut doesn't store anything it is just here to implement ChunkStore | ||||
| func (f *fakeChunkStore) Get(Address) (*Chunk, error) { | ||||
| func (f *fakeChunkStore) Get(context.Context, Address) (*Chunk, error) { | ||||
| 	return nil, errors.New("FakeChunkStore doesn't support Get") | ||||
| } | ||||
|  | ||||
| @@ -281,7 +281,7 @@ func TestRandomBrokenData(t *testing.T) { | ||||
| } | ||||
|  | ||||
| func benchReadAll(reader LazySectionReader) { | ||||
| 	size, _ := reader.Size(nil) | ||||
| 	size, _ := reader.Size(context.TODO(), nil) | ||||
| 	output := make([]byte, 1000) | ||||
| 	for pos := int64(0); pos < size; pos += 1000 { | ||||
| 		reader.ReadAt(output, pos) | ||||
|   | ||||
| @@ -16,7 +16,10 @@ | ||||
|  | ||||
| package storage | ||||
|  | ||||
| import "sync" | ||||
| import ( | ||||
| 	"context" | ||||
| 	"sync" | ||||
| ) | ||||
|  | ||||
| /* | ||||
| ChunkStore interface is implemented by : | ||||
| @@ -28,8 +31,8 @@ ChunkStore interface is implemented by : | ||||
| - FakeChunkStore: dummy store which doesn't store anything just implements the interface | ||||
| */ | ||||
| type ChunkStore interface { | ||||
| 	Put(*Chunk) // effectively there is no error even if there is an error | ||||
| 	Get(Address) (*Chunk, error) | ||||
| 	Put(context.Context, *Chunk) // effectively there is no error even if there is an error | ||||
| 	Get(context.Context, Address) (*Chunk, error) | ||||
| 	Close() | ||||
| } | ||||
|  | ||||
| @@ -45,14 +48,14 @@ func NewMapChunkStore() *MapChunkStore { | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (m *MapChunkStore) Put(chunk *Chunk) { | ||||
| func (m *MapChunkStore) Put(ctx context.Context, chunk *Chunk) { | ||||
| 	m.mu.Lock() | ||||
| 	defer m.mu.Unlock() | ||||
| 	m.chunks[chunk.Addr.Hex()] = chunk | ||||
| 	chunk.markAsStored() | ||||
| } | ||||
|  | ||||
| func (m *MapChunkStore) Get(addr Address) (*Chunk, error) { | ||||
| func (m *MapChunkStore) Get(ctx context.Context, addr Address) (*Chunk, error) { | ||||
| 	m.mu.RLock() | ||||
| 	defer m.mu.RUnlock() | ||||
| 	chunk := m.chunks[addr.Hex()] | ||||
|   | ||||
| @@ -16,6 +16,7 @@ | ||||
| package storage | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"sync" | ||||
|  | ||||
| 	"github.com/ethereum/go-ethereum/swarm/log" | ||||
| @@ -37,7 +38,7 @@ func PutChunks(store *LocalStore, chunks ...*Chunk) { | ||||
| 		} | ||||
| 	}() | ||||
| 	for _, c := range chunks { | ||||
| 		go store.Put(c) | ||||
| 		go store.Put(context.TODO(), c) | ||||
| 	} | ||||
| 	wg.Wait() | ||||
| } | ||||
|   | ||||
| @@ -18,6 +18,7 @@ package storage | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"context" | ||||
| 	"crypto/rand" | ||||
| 	"flag" | ||||
| 	"fmt" | ||||
| @@ -69,7 +70,7 @@ func mput(store ChunkStore, processors int, n int, f func(i int64) *Chunk) (hs [ | ||||
| 			for chunk := range c { | ||||
| 				wg.Add(1) | ||||
| 				chunk := chunk | ||||
| 				store.Put(chunk) | ||||
| 				store.Put(context.TODO(), chunk) | ||||
| 				go func() { | ||||
| 					defer wg.Done() | ||||
| 					<-chunk.dbStoredC | ||||
| @@ -103,7 +104,7 @@ func mget(store ChunkStore, hs []Address, f func(h Address, chunk *Chunk) error) | ||||
| 	for _, k := range hs { | ||||
| 		go func(h Address) { | ||||
| 			defer wg.Done() | ||||
| 			chunk, err := store.Get(h) | ||||
| 			chunk, err := store.Get(context.TODO(), h) | ||||
| 			if err != nil { | ||||
| 				errc <- err | ||||
| 				return | ||||
|   | ||||
| @@ -16,6 +16,8 @@ | ||||
|  | ||||
| package storage | ||||
|  | ||||
| import "context" | ||||
|  | ||||
| // wrapper of db-s to provide mockable custom local chunk store access to syncer | ||||
| type DBAPI struct { | ||||
| 	db  *LDBStore | ||||
| @@ -27,8 +29,8 @@ func NewDBAPI(loc *LocalStore) *DBAPI { | ||||
| } | ||||
|  | ||||
| // to obtain the chunks from address or request db entry only | ||||
| func (d *DBAPI) Get(addr Address) (*Chunk, error) { | ||||
| 	return d.loc.Get(addr) | ||||
| func (d *DBAPI) Get(ctx context.Context, addr Address) (*Chunk, error) { | ||||
| 	return d.loc.Get(ctx, addr) | ||||
| } | ||||
|  | ||||
| // current storage counter of chunk db | ||||
| @@ -42,11 +44,11 @@ func (d *DBAPI) Iterator(from uint64, to uint64, po uint8, f func(Address, uint6 | ||||
| } | ||||
|  | ||||
| // to obtain the chunks from address or request db entry only | ||||
| func (d *DBAPI) GetOrCreateRequest(addr Address) (*Chunk, bool) { | ||||
| 	return d.loc.GetOrCreateRequest(addr) | ||||
| func (d *DBAPI) GetOrCreateRequest(ctx context.Context, addr Address) (*Chunk, bool) { | ||||
| 	return d.loc.GetOrCreateRequest(ctx, addr) | ||||
| } | ||||
|  | ||||
| // to obtain the chunks from key or request db entry only | ||||
| func (d *DBAPI) Put(chunk *Chunk) { | ||||
| 	d.loc.Put(chunk) | ||||
| func (d *DBAPI) Put(ctx context.Context, chunk *Chunk) { | ||||
| 	d.loc.Put(ctx, chunk) | ||||
| } | ||||
|   | ||||
| @@ -74,7 +74,7 @@ func NewHasherStore(chunkStore ChunkStore, hashFunc SwarmHasher, toEncrypt bool) | ||||
| // Put stores the chunkData into the ChunkStore of the hasherStore and returns the reference. | ||||
| // If hasherStore has a chunkEncryption object, the data will be encrypted. | ||||
| // Asynchronous function, the data will not necessarily be stored when it returns. | ||||
| func (h *hasherStore) Put(chunkData ChunkData) (Reference, error) { | ||||
| func (h *hasherStore) Put(ctx context.Context, chunkData ChunkData) (Reference, error) { | ||||
| 	c := chunkData | ||||
| 	size := chunkData.Size() | ||||
| 	var encryptionKey encryption.Key | ||||
| @@ -87,7 +87,7 @@ func (h *hasherStore) Put(chunkData ChunkData) (Reference, error) { | ||||
| 	} | ||||
| 	chunk := h.createChunk(c, size) | ||||
|  | ||||
| 	h.storeChunk(chunk) | ||||
| 	h.storeChunk(ctx, chunk) | ||||
|  | ||||
| 	return Reference(append(chunk.Addr, encryptionKey...)), nil | ||||
| } | ||||
| @@ -95,14 +95,14 @@ func (h *hasherStore) Put(chunkData ChunkData) (Reference, error) { | ||||
| // Get returns data of the chunk with the given reference (retrieved from the ChunkStore of hasherStore). | ||||
| // If the data is encrypted and the reference contains an encryption key, it will be decrypted before | ||||
| // return. | ||||
| func (h *hasherStore) Get(ref Reference) (ChunkData, error) { | ||||
| func (h *hasherStore) Get(ctx context.Context, ref Reference) (ChunkData, error) { | ||||
| 	key, encryptionKey, err := parseReference(ref, h.hashSize) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	toDecrypt := (encryptionKey != nil) | ||||
|  | ||||
| 	chunk, err := h.store.Get(key) | ||||
| 	chunk, err := h.store.Get(ctx, key) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| @@ -207,13 +207,13 @@ func (h *hasherStore) RefSize() int64 { | ||||
| 	return h.refSize | ||||
| } | ||||
|  | ||||
| func (h *hasherStore) storeChunk(chunk *Chunk) { | ||||
| func (h *hasherStore) storeChunk(ctx context.Context, chunk *Chunk) { | ||||
| 	h.wg.Add(1) | ||||
| 	go func() { | ||||
| 		<-chunk.dbStoredC | ||||
| 		h.wg.Done() | ||||
| 	}() | ||||
| 	h.store.Put(chunk) | ||||
| 	h.store.Put(ctx, chunk) | ||||
| } | ||||
|  | ||||
| func parseReference(ref Reference, hashSize int) (Address, encryption.Key, error) { | ||||
|   | ||||
| @@ -47,13 +47,13 @@ func TestHasherStore(t *testing.T) { | ||||
|  | ||||
| 		// Put two random chunks into the hasherStore | ||||
| 		chunkData1 := GenerateRandomChunk(int64(tt.chunkLength)).SData | ||||
| 		key1, err := hasherStore.Put(chunkData1) | ||||
| 		key1, err := hasherStore.Put(context.TODO(), chunkData1) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("Expected no error got \"%v\"", err) | ||||
| 		} | ||||
|  | ||||
| 		chunkData2 := GenerateRandomChunk(int64(tt.chunkLength)).SData | ||||
| 		key2, err := hasherStore.Put(chunkData2) | ||||
| 		key2, err := hasherStore.Put(context.TODO(), chunkData2) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("Expected no error got \"%v\"", err) | ||||
| 		} | ||||
| @@ -67,7 +67,7 @@ func TestHasherStore(t *testing.T) { | ||||
| 		} | ||||
|  | ||||
| 		// Get the first chunk | ||||
| 		retrievedChunkData1, err := hasherStore.Get(key1) | ||||
| 		retrievedChunkData1, err := hasherStore.Get(context.TODO(), key1) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("Expected no error, got \"%v\"", err) | ||||
| 		} | ||||
| @@ -78,7 +78,7 @@ func TestHasherStore(t *testing.T) { | ||||
| 		} | ||||
|  | ||||
| 		// Get the second chunk | ||||
| 		retrievedChunkData2, err := hasherStore.Get(key2) | ||||
| 		retrievedChunkData2, err := hasherStore.Get(context.TODO(), key2) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("Expected no error, got \"%v\"", err) | ||||
| 		} | ||||
| @@ -105,7 +105,7 @@ func TestHasherStore(t *testing.T) { | ||||
| 		} | ||||
|  | ||||
| 		// Check if chunk data in store is encrypted or not | ||||
| 		chunkInStore, err := chunkStore.Get(hash1) | ||||
| 		chunkInStore, err := chunkStore.Get(context.TODO(), hash1) | ||||
| 		if err != nil { | ||||
| 			t.Fatalf("Expected no error got \"%v\"", err) | ||||
| 		} | ||||
|   | ||||
| @@ -25,6 +25,7 @@ package storage | ||||
| import ( | ||||
| 	"archive/tar" | ||||
| 	"bytes" | ||||
| 	"context" | ||||
| 	"encoding/binary" | ||||
| 	"encoding/hex" | ||||
| 	"fmt" | ||||
| @@ -370,7 +371,7 @@ func (s *LDBStore) Import(in io.Reader) (int64, error) { | ||||
| 		key := Address(keybytes) | ||||
| 		chunk := NewChunk(key, nil) | ||||
| 		chunk.SData = data[32:] | ||||
| 		s.Put(chunk) | ||||
| 		s.Put(context.TODO(), chunk) | ||||
| 		wg.Add(1) | ||||
| 		go func() { | ||||
| 			defer wg.Done() | ||||
| @@ -499,7 +500,7 @@ func (s *LDBStore) CurrentStorageIndex() uint64 { | ||||
| 	return s.dataIdx | ||||
| } | ||||
|  | ||||
| func (s *LDBStore) Put(chunk *Chunk) { | ||||
| func (s *LDBStore) Put(ctx context.Context, chunk *Chunk) { | ||||
| 	metrics.GetOrRegisterCounter("ldbstore.put", nil).Inc(1) | ||||
| 	log.Trace("ldbstore.put", "key", chunk.Addr) | ||||
|  | ||||
| @@ -639,7 +640,7 @@ func (s *LDBStore) tryAccessIdx(ikey []byte, index *dpaDBIndex) bool { | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| func (s *LDBStore) Get(addr Address) (chunk *Chunk, err error) { | ||||
| func (s *LDBStore) Get(ctx context.Context, addr Address) (chunk *Chunk, err error) { | ||||
| 	metrics.GetOrRegisterCounter("ldbstore.get", nil).Inc(1) | ||||
| 	log.Trace("ldbstore.get", "key", addr) | ||||
|  | ||||
|   | ||||
| @@ -18,6 +18,7 @@ package storage | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"context" | ||||
| 	"fmt" | ||||
| 	"io/ioutil" | ||||
| 	"os" | ||||
| @@ -157,7 +158,7 @@ func testDbStoreNotFound(t *testing.T, mock bool) { | ||||
| 		t.Fatalf("init dbStore failed: %v", err) | ||||
| 	} | ||||
|  | ||||
| 	_, err = db.Get(ZeroAddr) | ||||
| 	_, err = db.Get(context.TODO(), ZeroAddr) | ||||
| 	if err != ErrChunkNotFound { | ||||
| 		t.Errorf("Expected ErrChunkNotFound, got %v", err) | ||||
| 	} | ||||
| @@ -188,7 +189,7 @@ func testIterator(t *testing.T, mock bool) { | ||||
| 	wg := &sync.WaitGroup{} | ||||
| 	wg.Add(len(chunks)) | ||||
| 	for i = 0; i < len(chunks); i++ { | ||||
| 		db.Put(chunks[i]) | ||||
| 		db.Put(context.TODO(), chunks[i]) | ||||
| 		chunkkeys[i] = chunks[i].Addr | ||||
| 		j := i | ||||
| 		go func() { | ||||
| @@ -299,7 +300,7 @@ func TestLDBStoreWithoutCollectGarbage(t *testing.T) { | ||||
| 	} | ||||
|  | ||||
| 	for i := 0; i < n; i++ { | ||||
| 		go ldb.Put(chunks[i]) | ||||
| 		go ldb.Put(context.TODO(), chunks[i]) | ||||
| 	} | ||||
|  | ||||
| 	// wait for all chunks to be stored | ||||
| @@ -310,7 +311,7 @@ func TestLDBStoreWithoutCollectGarbage(t *testing.T) { | ||||
| 	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) | ||||
|  | ||||
| 	for i := 0; i < n; i++ { | ||||
| 		ret, err := ldb.Get(chunks[i].Addr) | ||||
| 		ret, err := ldb.Get(context.TODO(), chunks[i].Addr) | ||||
| 		if err != nil { | ||||
| 			t.Fatal(err) | ||||
| 		} | ||||
| @@ -349,7 +350,7 @@ func TestLDBStoreCollectGarbage(t *testing.T) { | ||||
| 	} | ||||
|  | ||||
| 	for i := 0; i < n; i++ { | ||||
| 		ldb.Put(chunks[i]) | ||||
| 		ldb.Put(context.TODO(), chunks[i]) | ||||
| 	} | ||||
|  | ||||
| 	// wait for all chunks to be stored | ||||
| @@ -364,7 +365,7 @@ func TestLDBStoreCollectGarbage(t *testing.T) { | ||||
|  | ||||
| 	var missing int | ||||
| 	for i := 0; i < n; i++ { | ||||
| 		ret, err := ldb.Get(chunks[i].Addr) | ||||
| 		ret, err := ldb.Get(context.TODO(), chunks[i].Addr) | ||||
| 		if err == ErrChunkNotFound || err == ldberrors.ErrNotFound { | ||||
| 			missing++ | ||||
| 			continue | ||||
| @@ -403,7 +404,7 @@ func TestLDBStoreAddRemove(t *testing.T) { | ||||
| 	} | ||||
|  | ||||
| 	for i := 0; i < n; i++ { | ||||
| 		go ldb.Put(chunks[i]) | ||||
| 		go ldb.Put(context.TODO(), chunks[i]) | ||||
| 	} | ||||
|  | ||||
| 	// wait for all chunks to be stored before continuing | ||||
| @@ -428,7 +429,7 @@ func TestLDBStoreAddRemove(t *testing.T) { | ||||
| 	log.Info("ldbstore", "entrycnt", ldb.entryCnt, "accesscnt", ldb.accessCnt) | ||||
|  | ||||
| 	for i := 0; i < n; i++ { | ||||
| 		ret, err := ldb.Get(chunks[i].Addr) | ||||
| 		ret, err := ldb.Get(context.TODO(), chunks[i].Addr) | ||||
|  | ||||
| 		if i%2 == 0 { | ||||
| 			// expect even chunks to be missing | ||||
| @@ -465,7 +466,7 @@ func TestLDBStoreRemoveThenCollectGarbage(t *testing.T) { | ||||
| 	} | ||||
|  | ||||
| 	for i := 0; i < n; i++ { | ||||
| 		ldb.Put(chunks[i]) | ||||
| 		ldb.Put(context.TODO(), chunks[i]) | ||||
| 	} | ||||
|  | ||||
| 	// wait for all chunks to be stored before continuing | ||||
| @@ -494,7 +495,7 @@ func TestLDBStoreRemoveThenCollectGarbage(t *testing.T) { | ||||
| 	n = 10 | ||||
|  | ||||
| 	for i := 0; i < n; i++ { | ||||
| 		ldb.Put(chunks[i]) | ||||
| 		ldb.Put(context.TODO(), chunks[i]) | ||||
| 	} | ||||
|  | ||||
| 	// wait for all chunks to be stored before continuing | ||||
| @@ -504,14 +505,14 @@ func TestLDBStoreRemoveThenCollectGarbage(t *testing.T) { | ||||
|  | ||||
| 	// expect for first chunk to be missing, because it has the smallest access value | ||||
| 	idx := 0 | ||||
| 	ret, err := ldb.Get(chunks[idx].Addr) | ||||
| 	ret, err := ldb.Get(context.TODO(), chunks[idx].Addr) | ||||
| 	if err == nil || ret != nil { | ||||
| 		t.Fatal("expected first chunk to be missing, but got no error") | ||||
| 	} | ||||
|  | ||||
| 	// expect for last chunk to be present, as it has the largest access value | ||||
| 	idx = 9 | ||||
| 	ret, err = ldb.Get(chunks[idx].Addr) | ||||
| 	ret, err = ldb.Get(context.TODO(), chunks[idx].Addr) | ||||
| 	if err != nil { | ||||
| 		t.Fatalf("expected no error, but got %s", err) | ||||
| 	} | ||||
|   | ||||
| @@ -17,6 +17,7 @@ | ||||
| package storage | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"encoding/binary" | ||||
| 	"fmt" | ||||
| 	"path/filepath" | ||||
| @@ -96,7 +97,7 @@ func NewTestLocalStoreForAddr(params *LocalStoreParams) (*LocalStore, error) { | ||||
| // when the chunk is stored in memstore. | ||||
| // After the LDBStore.Put, it is ensured that the MemStore | ||||
| // contains the chunk with the same data, but nil ReqC channel. | ||||
| func (ls *LocalStore) Put(chunk *Chunk) { | ||||
| func (ls *LocalStore) Put(ctx context.Context, chunk *Chunk) { | ||||
| 	if l := len(chunk.SData); l < 9 { | ||||
| 		log.Debug("incomplete chunk data", "addr", chunk.Addr, "length", l) | ||||
| 		chunk.SetErrored(ErrChunkInvalid) | ||||
| @@ -123,7 +124,7 @@ func (ls *LocalStore) Put(chunk *Chunk) { | ||||
|  | ||||
| 	chunk.Size = int64(binary.LittleEndian.Uint64(chunk.SData[0:8])) | ||||
|  | ||||
| 	memChunk, err := ls.memStore.Get(chunk.Addr) | ||||
| 	memChunk, err := ls.memStore.Get(ctx, chunk.Addr) | ||||
| 	switch err { | ||||
| 	case nil: | ||||
| 		if memChunk.ReqC == nil { | ||||
| @@ -136,7 +137,7 @@ func (ls *LocalStore) Put(chunk *Chunk) { | ||||
| 		return | ||||
| 	} | ||||
|  | ||||
| 	ls.DbStore.Put(chunk) | ||||
| 	ls.DbStore.Put(ctx, chunk) | ||||
|  | ||||
| 	// chunk is no longer a request, but a chunk with data, so replace it in memStore | ||||
| 	newc := NewChunk(chunk.Addr, nil) | ||||
| @@ -144,7 +145,7 @@ func (ls *LocalStore) Put(chunk *Chunk) { | ||||
| 	newc.Size = chunk.Size | ||||
| 	newc.dbStoredC = chunk.dbStoredC | ||||
|  | ||||
| 	ls.memStore.Put(newc) | ||||
| 	ls.memStore.Put(ctx, newc) | ||||
|  | ||||
| 	if memChunk != nil && memChunk.ReqC != nil { | ||||
| 		close(memChunk.ReqC) | ||||
| @@ -155,15 +156,15 @@ func (ls *LocalStore) Put(chunk *Chunk) { | ||||
| // This method is blocking until the chunk is retrieved | ||||
| // so additional timeout may be needed to wrap this call if | ||||
| // ChunkStores are remote and can have long latency | ||||
| func (ls *LocalStore) Get(addr Address) (chunk *Chunk, err error) { | ||||
| func (ls *LocalStore) Get(ctx context.Context, addr Address) (chunk *Chunk, err error) { | ||||
| 	ls.mu.Lock() | ||||
| 	defer ls.mu.Unlock() | ||||
|  | ||||
| 	return ls.get(addr) | ||||
| 	return ls.get(ctx, addr) | ||||
| } | ||||
|  | ||||
| func (ls *LocalStore) get(addr Address) (chunk *Chunk, err error) { | ||||
| 	chunk, err = ls.memStore.Get(addr) | ||||
| func (ls *LocalStore) get(ctx context.Context, addr Address) (chunk *Chunk, err error) { | ||||
| 	chunk, err = ls.memStore.Get(ctx, addr) | ||||
| 	if err == nil { | ||||
| 		if chunk.ReqC != nil { | ||||
| 			select { | ||||
| @@ -177,25 +178,25 @@ func (ls *LocalStore) get(addr Address) (chunk *Chunk, err error) { | ||||
| 		return | ||||
| 	} | ||||
| 	metrics.GetOrRegisterCounter("localstore.get.cachemiss", nil).Inc(1) | ||||
| 	chunk, err = ls.DbStore.Get(addr) | ||||
| 	chunk, err = ls.DbStore.Get(ctx, addr) | ||||
| 	if err != nil { | ||||
| 		metrics.GetOrRegisterCounter("localstore.get.error", nil).Inc(1) | ||||
| 		return | ||||
| 	} | ||||
| 	chunk.Size = int64(binary.LittleEndian.Uint64(chunk.SData[0:8])) | ||||
| 	ls.memStore.Put(chunk) | ||||
| 	ls.memStore.Put(ctx, chunk) | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // retrieve logic common for local and network chunk retrieval requests | ||||
| func (ls *LocalStore) GetOrCreateRequest(addr Address) (chunk *Chunk, created bool) { | ||||
| func (ls *LocalStore) GetOrCreateRequest(ctx context.Context, addr Address) (chunk *Chunk, created bool) { | ||||
| 	metrics.GetOrRegisterCounter("localstore.getorcreaterequest", nil).Inc(1) | ||||
|  | ||||
| 	ls.mu.Lock() | ||||
| 	defer ls.mu.Unlock() | ||||
|  | ||||
| 	var err error | ||||
| 	chunk, err = ls.get(addr) | ||||
| 	chunk, err = ls.get(ctx, addr) | ||||
| 	if err == nil && chunk.GetErrored() == nil { | ||||
| 		metrics.GetOrRegisterCounter("localstore.getorcreaterequest.hit", nil).Inc(1) | ||||
| 		log.Trace(fmt.Sprintf("LocalStore.GetOrRetrieve: %v found locally", addr)) | ||||
| @@ -210,7 +211,7 @@ func (ls *LocalStore) GetOrCreateRequest(addr Address) (chunk *Chunk, created bo | ||||
| 	metrics.GetOrRegisterCounter("localstore.getorcreaterequest.miss", nil).Inc(1) | ||||
| 	log.Trace(fmt.Sprintf("LocalStore.GetOrRetrieve: %v not found locally. open new request", addr)) | ||||
| 	chunk = NewChunk(addr, make(chan bool)) | ||||
| 	ls.memStore.Put(chunk) | ||||
| 	ls.memStore.Put(ctx, chunk) | ||||
| 	return chunk, true | ||||
| } | ||||
|  | ||||
|   | ||||
| @@ -19,6 +19,7 @@ | ||||
| package storage | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"sync" | ||||
|  | ||||
| 	lru "github.com/hashicorp/golang-lru" | ||||
| @@ -68,7 +69,7 @@ func NewMemStore(params *StoreParams, _ *LDBStore) (m *MemStore) { | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (m *MemStore) Get(addr Address) (*Chunk, error) { | ||||
| func (m *MemStore) Get(ctx context.Context, addr Address) (*Chunk, error) { | ||||
| 	if m.disabled { | ||||
| 		return nil, ErrChunkNotFound | ||||
| 	} | ||||
| @@ -90,7 +91,7 @@ func (m *MemStore) Get(addr Address) (*Chunk, error) { | ||||
| 	return c.(*Chunk), nil | ||||
| } | ||||
|  | ||||
| func (m *MemStore) Put(c *Chunk) { | ||||
| func (m *MemStore) Put(ctx context.Context, c *Chunk) { | ||||
| 	if m.disabled { | ||||
| 		return | ||||
| 	} | ||||
|   | ||||
| @@ -17,6 +17,7 @@ | ||||
| package storage | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"crypto/rand" | ||||
| 	"encoding/binary" | ||||
| 	"io/ioutil" | ||||
| @@ -72,7 +73,7 @@ func TestMemStoreNotFound(t *testing.T) { | ||||
| 	m := newTestMemStore() | ||||
| 	defer m.Close() | ||||
|  | ||||
| 	_, err := m.Get(ZeroAddr) | ||||
| 	_, err := m.Get(context.TODO(), ZeroAddr) | ||||
| 	if err != ErrChunkNotFound { | ||||
| 		t.Errorf("Expected ErrChunkNotFound, got %v", err) | ||||
| 	} | ||||
| @@ -187,8 +188,8 @@ func TestMemStoreAndLDBStore(t *testing.T) { | ||||
| 		} | ||||
|  | ||||
| 		for i := 0; i < tt.n; i++ { | ||||
| 			go ldb.Put(chunks[i]) | ||||
| 			memStore.Put(chunks[i]) | ||||
| 			go ldb.Put(context.TODO(), chunks[i]) | ||||
| 			memStore.Put(context.TODO(), chunks[i]) | ||||
|  | ||||
| 			if got := memStore.cache.Len(); got > cacheCap { | ||||
| 				t.Fatalf("expected to get cache capacity less than %v, but got %v", cacheCap, got) | ||||
| @@ -200,10 +201,10 @@ func TestMemStoreAndLDBStore(t *testing.T) { | ||||
| 		} | ||||
|  | ||||
| 		for i := 0; i < tt.n; i++ { | ||||
| 			_, err := memStore.Get(chunks[i].Addr) | ||||
| 			_, err := memStore.Get(context.TODO(), chunks[i].Addr) | ||||
| 			if err != nil { | ||||
| 				if err == ErrChunkNotFound { | ||||
| 					_, err := ldb.Get(chunks[i].Addr) | ||||
| 					_, err := ldb.Get(context.TODO(), chunks[i].Addr) | ||||
| 					if err != nil { | ||||
| 						t.Fatalf("couldn't get chunk %v from ldb, got error: %v", i, err) | ||||
| 					} | ||||
|   | ||||
| @@ -125,6 +125,10 @@ type resource struct { | ||||
| 	updated    time.Time | ||||
| } | ||||
|  | ||||
| func (r *resource) Context() context.Context { | ||||
| 	return context.TODO() | ||||
| } | ||||
|  | ||||
| // TODO Expire content after a defined period (to force resync) | ||||
| func (r *resource) isSynced() bool { | ||||
| 	return !r.updated.IsZero() | ||||
| @@ -134,7 +138,7 @@ func (r *resource) NameHash() common.Hash { | ||||
| 	return r.nameHash | ||||
| } | ||||
|  | ||||
| func (r *resource) Size(chan bool) (int64, error) { | ||||
| func (r *resource) Size(context.Context, chan bool) (int64, error) { | ||||
| 	if !r.isSynced() { | ||||
| 		return 0, NewError(ErrNotSynced, "Not synced") | ||||
| 	} | ||||
| @@ -413,7 +417,7 @@ func (h *Handler) New(ctx context.Context, name string, frequency uint64) (stora | ||||
|  | ||||
| 	chunk := h.newMetaChunk(name, currentblock, frequency) | ||||
|  | ||||
| 	h.chunkStore.Put(chunk) | ||||
| 	h.chunkStore.Put(ctx, chunk) | ||||
| 	log.Debug("new resource", "name", name, "key", nameHash, "startBlock", currentblock, "frequency", frequency) | ||||
|  | ||||
| 	// create the internal index for the resource and populate it with the data of the first version | ||||
| @@ -593,7 +597,7 @@ func (h *Handler) lookup(rsrc *resource, period uint32, version uint32, refresh | ||||
| 			return nil, NewError(ErrPeriodDepth, fmt.Sprintf("Lookup exceeded max period hops (%d)", maxLookup.Max)) | ||||
| 		} | ||||
| 		key := h.resourceHash(period, version, rsrc.nameHash) | ||||
| 		chunk, err := h.chunkStore.GetWithTimeout(key, defaultRetrieveTimeout) | ||||
| 		chunk, err := h.chunkStore.GetWithTimeout(context.TODO(), key, defaultRetrieveTimeout) | ||||
| 		if err == nil { | ||||
| 			if specificversion { | ||||
| 				return h.updateIndex(rsrc, chunk) | ||||
| @@ -603,7 +607,7 @@ func (h *Handler) lookup(rsrc *resource, period uint32, version uint32, refresh | ||||
| 			for { | ||||
| 				newversion := version + 1 | ||||
| 				key := h.resourceHash(period, newversion, rsrc.nameHash) | ||||
| 				newchunk, err := h.chunkStore.GetWithTimeout(key, defaultRetrieveTimeout) | ||||
| 				newchunk, err := h.chunkStore.GetWithTimeout(context.TODO(), key, defaultRetrieveTimeout) | ||||
| 				if err != nil { | ||||
| 					return h.updateIndex(rsrc, chunk) | ||||
| 				} | ||||
| @@ -621,8 +625,8 @@ func (h *Handler) lookup(rsrc *resource, period uint32, version uint32, refresh | ||||
|  | ||||
| // Retrieves a resource metadata chunk and creates/updates the index entry for it | ||||
| // with the resulting metadata | ||||
| func (h *Handler) Load(addr storage.Address) (*resource, error) { | ||||
| 	chunk, err := h.chunkStore.GetWithTimeout(addr, defaultRetrieveTimeout) | ||||
| func (h *Handler) Load(ctx context.Context, addr storage.Address) (*resource, error) { | ||||
| 	chunk, err := h.chunkStore.GetWithTimeout(ctx, addr, defaultRetrieveTimeout) | ||||
| 	if err != nil { | ||||
| 		return nil, NewError(ErrNotFound, err.Error()) | ||||
| 	} | ||||
| @@ -890,7 +894,7 @@ func (h *Handler) update(ctx context.Context, name string, data []byte, multihas | ||||
| 	chunk := newUpdateChunk(key, signature, nextperiod, version, name, data, datalength) | ||||
|  | ||||
| 	// send the chunk | ||||
| 	h.chunkStore.Put(chunk) | ||||
| 	h.chunkStore.Put(ctx, chunk) | ||||
| 	log.Trace("resource update", "name", name, "key", key, "currentblock", currentblock, "lastperiod", nextperiod, "version", version, "data", chunk.SData, "multihash", multihash) | ||||
|  | ||||
| 	// update our resources map entry and return the new key | ||||
|   | ||||
| @@ -182,7 +182,7 @@ func TestHandler(t *testing.T) { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
|  | ||||
| 	chunk, err := rh.chunkStore.Get(storage.Address(rootChunkKey)) | ||||
| 	chunk, err := rh.chunkStore.Get(context.TODO(), storage.Address(rootChunkKey)) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} else if len(chunk.SData) < 16 { | ||||
| @@ -256,7 +256,7 @@ func TestHandler(t *testing.T) { | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| 	} | ||||
| 	rsrc2, err := rh2.Load(rootChunkKey) | ||||
| 	rsrc2, err := rh2.Load(context.TODO(), rootChunkKey) | ||||
| 	_, err = rh2.LookupLatest(ctx, nameHash, true, nil) | ||||
| 	if err != nil { | ||||
| 		t.Fatal(err) | ||||
| @@ -754,7 +754,7 @@ func newTestSigner() (*GenericSigner, error) { | ||||
| } | ||||
|  | ||||
| func getUpdateDirect(rh *Handler, addr storage.Address) ([]byte, error) { | ||||
| 	chunk, err := rh.chunkStore.Get(addr) | ||||
| 	chunk, err := rh.chunkStore.Get(context.TODO(), addr) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
|   | ||||
| @@ -17,9 +17,12 @@ | ||||
| package storage | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/ethereum/go-ethereum/swarm/log" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/spancontext" | ||||
| 	opentracing "github.com/opentracing/opentracing-go" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| @@ -43,10 +46,10 @@ var ( | ||||
| // access by calling network is blocking with a timeout | ||||
| type NetStore struct { | ||||
| 	localStore *LocalStore | ||||
| 	retrieve   func(chunk *Chunk) error | ||||
| 	retrieve   func(ctx context.Context, chunk *Chunk) error | ||||
| } | ||||
|  | ||||
| func NewNetStore(localStore *LocalStore, retrieve func(chunk *Chunk) error) *NetStore { | ||||
| func NewNetStore(localStore *LocalStore, retrieve func(ctx context.Context, chunk *Chunk) error) *NetStore { | ||||
| 	return &NetStore{localStore, retrieve} | ||||
| } | ||||
|  | ||||
| @@ -56,7 +59,14 @@ func NewNetStore(localStore *LocalStore, retrieve func(chunk *Chunk) error) *Net | ||||
| // Get uses get method to retrieve request, but retries if the | ||||
| // ErrChunkNotFound is returned by get, until the netStoreRetryTimeout | ||||
| // is reached. | ||||
| func (ns *NetStore) Get(addr Address) (chunk *Chunk, err error) { | ||||
| func (ns *NetStore) Get(ctx context.Context, addr Address) (chunk *Chunk, err error) { | ||||
|  | ||||
| 	var sp opentracing.Span | ||||
| 	ctx, sp = spancontext.StartSpan( | ||||
| 		ctx, | ||||
| 		"netstore.get.global") | ||||
| 	defer sp.Finish() | ||||
|  | ||||
| 	timer := time.NewTimer(netStoreRetryTimeout) | ||||
| 	defer timer.Stop() | ||||
|  | ||||
| @@ -84,7 +94,7 @@ func (ns *NetStore) Get(addr Address) (chunk *Chunk, err error) { | ||||
| 		defer limiter.Stop() | ||||
|  | ||||
| 		for { | ||||
| 			chunk, err := ns.get(addr, 0) | ||||
| 			chunk, err := ns.get(ctx, addr, 0) | ||||
| 			if err != ErrChunkNotFound { | ||||
| 				// break retry only if the error is nil | ||||
| 				// or other error then ErrChunkNotFound | ||||
| @@ -122,16 +132,23 @@ func (ns *NetStore) Get(addr Address) (chunk *Chunk, err error) { | ||||
| } | ||||
|  | ||||
| // GetWithTimeout makes a single retrieval attempt for a chunk with a explicit timeout parameter | ||||
| func (ns *NetStore) GetWithTimeout(addr Address, timeout time.Duration) (chunk *Chunk, err error) { | ||||
| 	return ns.get(addr, timeout) | ||||
| func (ns *NetStore) GetWithTimeout(ctx context.Context, addr Address, timeout time.Duration) (chunk *Chunk, err error) { | ||||
| 	return ns.get(ctx, addr, timeout) | ||||
| } | ||||
|  | ||||
| func (ns *NetStore) get(addr Address, timeout time.Duration) (chunk *Chunk, err error) { | ||||
| func (ns *NetStore) get(ctx context.Context, addr Address, timeout time.Duration) (chunk *Chunk, err error) { | ||||
| 	if timeout == 0 { | ||||
| 		timeout = searchTimeout | ||||
| 	} | ||||
|  | ||||
| 	var sp opentracing.Span | ||||
| 	ctx, sp = spancontext.StartSpan( | ||||
| 		ctx, | ||||
| 		"netstore.get") | ||||
| 	defer sp.Finish() | ||||
|  | ||||
| 	if ns.retrieve == nil { | ||||
| 		chunk, err = ns.localStore.Get(addr) | ||||
| 		chunk, err = ns.localStore.Get(ctx, addr) | ||||
| 		if err == nil { | ||||
| 			return chunk, nil | ||||
| 		} | ||||
| @@ -140,14 +157,14 @@ func (ns *NetStore) get(addr Address, timeout time.Duration) (chunk *Chunk, err | ||||
| 		} | ||||
| 	} else { | ||||
| 		var created bool | ||||
| 		chunk, created = ns.localStore.GetOrCreateRequest(addr) | ||||
| 		chunk, created = ns.localStore.GetOrCreateRequest(ctx, addr) | ||||
|  | ||||
| 		if chunk.ReqC == nil { | ||||
| 			return chunk, nil | ||||
| 		} | ||||
|  | ||||
| 		if created { | ||||
| 			err := ns.retrieve(chunk) | ||||
| 			err := ns.retrieve(ctx, chunk) | ||||
| 			if err != nil { | ||||
| 				// mark chunk request as failed so that we can retry it later | ||||
| 				chunk.SetErrored(ErrChunkUnavailable) | ||||
| @@ -171,8 +188,8 @@ func (ns *NetStore) get(addr Address, timeout time.Duration) (chunk *Chunk, err | ||||
| } | ||||
|  | ||||
| // Put is the entrypoint for local store requests coming from storeLoop | ||||
| func (ns *NetStore) Put(chunk *Chunk) { | ||||
| 	ns.localStore.Put(chunk) | ||||
| func (ns *NetStore) Put(ctx context.Context, chunk *Chunk) { | ||||
| 	ns.localStore.Put(ctx, chunk) | ||||
| } | ||||
|  | ||||
| // Close chunk store | ||||
|   | ||||
| @@ -17,6 +17,7 @@ | ||||
| package storage | ||||
|  | ||||
| import ( | ||||
| 	"context" | ||||
| 	"encoding/hex" | ||||
| 	"errors" | ||||
| 	"io/ioutil" | ||||
| @@ -46,7 +47,7 @@ func newDummyChunk(addr Address) *Chunk { | ||||
| 	return chunk | ||||
| } | ||||
|  | ||||
| func (m *mockRetrieve) retrieve(chunk *Chunk) error { | ||||
| func (m *mockRetrieve) retrieve(ctx context.Context, chunk *Chunk) error { | ||||
| 	hkey := hex.EncodeToString(chunk.Addr) | ||||
| 	m.requests[hkey] += 1 | ||||
|  | ||||
| @@ -100,7 +101,7 @@ func TestNetstoreFailedRequest(t *testing.T) { | ||||
| 	// } | ||||
|  | ||||
| 	// second call | ||||
| 	_, err = netStore.Get(key) | ||||
| 	_, err = netStore.Get(context.TODO(), key) | ||||
| 	if got := r.requests[hex.EncodeToString(key)]; got != 2 { | ||||
| 		t.Fatalf("expected to have called retrieve two times, but got: %v", got) | ||||
| 	} | ||||
| @@ -109,7 +110,7 @@ func TestNetstoreFailedRequest(t *testing.T) { | ||||
| 	} | ||||
|  | ||||
| 	// third call | ||||
| 	chunk, err := netStore.Get(key) | ||||
| 	chunk, err := netStore.Get(context.TODO(), key) | ||||
| 	if got := r.requests[hex.EncodeToString(key)]; got != 3 { | ||||
| 		t.Fatalf("expected to have called retrieve three times, but got: %v", got) | ||||
| 	} | ||||
|   | ||||
| @@ -287,7 +287,7 @@ func (pc *PyramidChunker) processor(id int64) { | ||||
| func (pc *PyramidChunker) processChunk(id int64, job *chunkJob) { | ||||
| 	log.Debug("pyramid.chunker: processChunk()", "id", id) | ||||
|  | ||||
| 	ref, err := pc.putter.Put(job.chunk) | ||||
| 	ref, err := pc.putter.Put(context.TODO(), job.chunk) | ||||
| 	if err != nil { | ||||
| 		pc.errC <- err | ||||
| 	} | ||||
| @@ -302,7 +302,7 @@ func (pc *PyramidChunker) processChunk(id int64, job *chunkJob) { | ||||
| func (pc *PyramidChunker) loadTree() error { | ||||
| 	log.Debug("pyramid.chunker: loadTree()") | ||||
| 	// Get the root chunk to get the total size | ||||
| 	chunkData, err := pc.getter.Get(Reference(pc.key)) | ||||
| 	chunkData, err := pc.getter.Get(context.TODO(), Reference(pc.key)) | ||||
| 	if err != nil { | ||||
| 		return errLoadingTreeRootChunk | ||||
| 	} | ||||
| @@ -355,7 +355,7 @@ func (pc *PyramidChunker) loadTree() error { | ||||
| 			branchCount = int64(len(ent.chunk)-8) / pc.hashSize | ||||
| 			for i := int64(0); i < branchCount; i++ { | ||||
| 				key := ent.chunk[8+(i*pc.hashSize) : 8+((i+1)*pc.hashSize)] | ||||
| 				newChunkData, err := pc.getter.Get(Reference(key)) | ||||
| 				newChunkData, err := pc.getter.Get(context.TODO(), Reference(key)) | ||||
| 				if err != nil { | ||||
| 					return errLoadingTreeChunk | ||||
| 				} | ||||
| @@ -417,7 +417,7 @@ func (pc *PyramidChunker) prepareChunks(isAppend bool) { | ||||
| 			lastKey := parent.chunk[8+lastBranch*pc.hashSize : 8+(lastBranch+1)*pc.hashSize] | ||||
|  | ||||
| 			var err error | ||||
| 			unfinishedChunkData, err = pc.getter.Get(lastKey) | ||||
| 			unfinishedChunkData, err = pc.getter.Get(context.TODO(), lastKey) | ||||
| 			if err != nil { | ||||
| 				pc.errC <- err | ||||
| 			} | ||||
|   | ||||
| @@ -250,7 +250,8 @@ func GenerateRandomChunks(dataSize int64, count int) (chunks []*Chunk) { | ||||
|  | ||||
| // Size, Seek, Read, ReadAt | ||||
| type LazySectionReader interface { | ||||
| 	Size(chan bool) (int64, error) | ||||
| 	Context() context.Context | ||||
| 	Size(context.Context, chan bool) (int64, error) | ||||
| 	io.Seeker | ||||
| 	io.Reader | ||||
| 	io.ReaderAt | ||||
| @@ -260,10 +261,14 @@ type LazyTestSectionReader struct { | ||||
| 	*io.SectionReader | ||||
| } | ||||
|  | ||||
| func (r *LazyTestSectionReader) Size(chan bool) (int64, error) { | ||||
| func (r *LazyTestSectionReader) Size(context.Context, chan bool) (int64, error) { | ||||
| 	return r.SectionReader.Size(), nil | ||||
| } | ||||
|  | ||||
| func (r *LazyTestSectionReader) Context() context.Context { | ||||
| 	return context.TODO() | ||||
| } | ||||
|  | ||||
| type StoreParams struct { | ||||
| 	Hash                       SwarmHasher `toml:"-"` | ||||
| 	DbCapacity                 uint64 | ||||
| @@ -298,7 +303,7 @@ type Reference []byte | ||||
|  | ||||
| // Putter is responsible to store data and create a reference for it | ||||
| type Putter interface { | ||||
| 	Put(ChunkData) (Reference, error) | ||||
| 	Put(context.Context, ChunkData) (Reference, error) | ||||
| 	// RefSize returns the length of the Reference created by this Putter | ||||
| 	RefSize() int64 | ||||
| 	// Close is to indicate that no more chunk data will be Put on this Putter | ||||
| @@ -309,7 +314,7 @@ type Putter interface { | ||||
|  | ||||
| // Getter is an interface to retrieve a chunk's data by its reference | ||||
| type Getter interface { | ||||
| 	Get(Reference) (ChunkData, error) | ||||
| 	Get(context.Context, Reference) (ChunkData, error) | ||||
| } | ||||
|  | ||||
| // NOTE: this returns invalid data if chunk is encrypted | ||||
|   | ||||
| @@ -21,6 +21,7 @@ import ( | ||||
| 	"context" | ||||
| 	"crypto/ecdsa" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"math/big" | ||||
| 	"net" | ||||
| 	"path/filepath" | ||||
| @@ -50,6 +51,7 @@ import ( | ||||
| 	"github.com/ethereum/go-ethereum/swarm/storage" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/storage/mock" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/storage/mru" | ||||
| 	"github.com/ethereum/go-ethereum/swarm/tracing" | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| @@ -76,6 +78,8 @@ type Swarm struct { | ||||
| 	lstore      *storage.LocalStore // local store, needs to store for releasing resources after node stopped | ||||
| 	sfs         *fuse.SwarmFS       // need this to cleanup all the active mounts on node exit | ||||
| 	ps          *pss.Pss | ||||
|  | ||||
| 	tracerClose io.Closer | ||||
| } | ||||
|  | ||||
| type SwarmAPI struct { | ||||
| @@ -356,6 +360,8 @@ Start is called when the stack is started | ||||
| func (self *Swarm) Start(srv *p2p.Server) error { | ||||
| 	startTime = time.Now() | ||||
|  | ||||
| 	self.tracerClose = tracing.Closer | ||||
|  | ||||
| 	// update uaddr to correct enode | ||||
| 	newaddr := self.bzz.UpdateLocalAddr([]byte(srv.Self().String())) | ||||
| 	log.Warn("Updated bzz local addr", "oaddr", fmt.Sprintf("%x", newaddr.OAddr), "uaddr", fmt.Sprintf("%s", newaddr.UAddr)) | ||||
| @@ -424,6 +430,13 @@ func (self *Swarm) updateGauges() { | ||||
| // implements the node.Service interface | ||||
| // stops all component services. | ||||
| func (self *Swarm) Stop() error { | ||||
| 	if self.tracerClose != nil { | ||||
| 		err := self.tracerClose.Close() | ||||
| 		if err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if self.ps != nil { | ||||
| 		self.ps.Stop() | ||||
| 	} | ||||
|   | ||||
							
								
								
									
										103
									
								
								swarm/tracing/tracing.go
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										103
									
								
								swarm/tracing/tracing.go
									
									
									
									
									
										Normal file
									
								
							| @@ -0,0 +1,103 @@ | ||||
| package tracing | ||||
|  | ||||
| import ( | ||||
| 	"io" | ||||
| 	"os" | ||||
| 	"strings" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/ethereum/go-ethereum/log" | ||||
| 	jaeger "github.com/uber/jaeger-client-go" | ||||
| 	jaegercfg "github.com/uber/jaeger-client-go/config" | ||||
| 	jaegerlog "github.com/uber/jaeger-client-go/log" | ||||
| 	cli "gopkg.in/urfave/cli.v1" | ||||
| ) | ||||
|  | ||||
| var Enabled bool = false | ||||
|  | ||||
| // TracingEnabledFlag is the CLI flag name to use to enable trace collections. | ||||
| const TracingEnabledFlag = "tracing" | ||||
|  | ||||
| var ( | ||||
| 	Closer io.Closer | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	TracingFlag = cli.BoolFlag{ | ||||
| 		Name:  TracingEnabledFlag, | ||||
| 		Usage: "Enable tracing", | ||||
| 	} | ||||
| 	TracingEndpointFlag = cli.StringFlag{ | ||||
| 		Name:  "tracing.endpoint", | ||||
| 		Usage: "Tracing endpoint", | ||||
| 		Value: "0.0.0.0:6831", | ||||
| 	} | ||||
| 	TracingSvcFlag = cli.StringFlag{ | ||||
| 		Name:  "tracing.svc", | ||||
| 		Usage: "Tracing service name", | ||||
| 		Value: "swarm", | ||||
| 	} | ||||
| ) | ||||
|  | ||||
| // Flags holds all command-line flags required for tracing collection. | ||||
| var Flags = []cli.Flag{ | ||||
| 	TracingFlag, | ||||
| 	TracingEndpointFlag, | ||||
| 	TracingSvcFlag, | ||||
| } | ||||
|  | ||||
| // Init enables or disables the open tracing system. | ||||
| func init() { | ||||
| 	for _, arg := range os.Args { | ||||
| 		if flag := strings.TrimLeft(arg, "-"); flag == TracingEnabledFlag { | ||||
| 			Enabled = true | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func Setup(ctx *cli.Context) { | ||||
| 	if Enabled { | ||||
| 		log.Info("Enabling opentracing") | ||||
| 		var ( | ||||
| 			endpoint = ctx.GlobalString(TracingEndpointFlag.Name) | ||||
| 			svc      = ctx.GlobalString(TracingSvcFlag.Name) | ||||
| 		) | ||||
|  | ||||
| 		Closer = initTracer(endpoint, svc) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func initTracer(endpoint, svc string) (closer io.Closer) { | ||||
| 	// Sample configuration for testing. Use constant sampling to sample every trace | ||||
| 	// and enable LogSpan to log every span via configured Logger. | ||||
| 	cfg := jaegercfg.Configuration{ | ||||
| 		Sampler: &jaegercfg.SamplerConfig{ | ||||
| 			Type:  jaeger.SamplerTypeConst, | ||||
| 			Param: 1, | ||||
| 		}, | ||||
| 		Reporter: &jaegercfg.ReporterConfig{ | ||||
| 			LogSpans:            true, | ||||
| 			BufferFlushInterval: 1 * time.Second, | ||||
| 			LocalAgentHostPort:  endpoint, | ||||
| 		}, | ||||
| 	} | ||||
|  | ||||
| 	// Example logger and metrics factory. Use github.com/uber/jaeger-client-go/log | ||||
| 	// and github.com/uber/jaeger-lib/metrics respectively to bind to real logging and metrics | ||||
| 	// frameworks. | ||||
| 	jLogger := jaegerlog.StdLogger | ||||
| 	//jMetricsFactory := metrics.NullFactory | ||||
|  | ||||
| 	// Initialize tracer with a logger and a metrics factory | ||||
| 	closer, err := cfg.InitGlobalTracer( | ||||
| 		svc, | ||||
| 		jaegercfg.Logger(jLogger), | ||||
| 		//jaegercfg.Metrics(jMetricsFactory), | ||||
| 		//jaegercfg.Observer(rpcmetrics.NewObserver(jMetricsFactory, rpcmetrics.DefaultNameNormalizer)), | ||||
| 	) | ||||
| 	if err != nil { | ||||
| 		log.Error("Could not initialize Jaeger tracer", "err", err) | ||||
| 	} | ||||
|  | ||||
| 	return closer | ||||
| } | ||||
							
								
								
									
										21
									
								
								vendor/github.com/codahale/hdrhistogram/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										21
									
								
								vendor/github.com/codahale/hdrhistogram/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,21 @@ | ||||
| The MIT License (MIT) | ||||
|  | ||||
| Copyright (c) 2014 Coda Hale | ||||
|  | ||||
| Permission is hereby granted, free of charge, to any person obtaining a copy | ||||
| of this software and associated documentation files (the "Software"), to deal | ||||
| in the Software without restriction, including without limitation the rights | ||||
| to use, copy, modify, merge, publish, distribute, sublicense, and/or sell | ||||
| copies of the Software, and to permit persons to whom the Software is | ||||
| furnished to do so, subject to the following conditions: | ||||
|  | ||||
| The above copyright notice and this permission notice shall be included in | ||||
| all copies or substantial portions of the Software. | ||||
|  | ||||
| THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | ||||
| IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | ||||
| FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE | ||||
| AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | ||||
| LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, | ||||
| OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN | ||||
| THE SOFTWARE. | ||||
							
								
								
									
										15
									
								
								vendor/github.com/codahale/hdrhistogram/README.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										15
									
								
								vendor/github.com/codahale/hdrhistogram/README.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,15 @@ | ||||
| hdrhistogram | ||||
| ============ | ||||
|  | ||||
| [](https://travis-ci.org/codahale/hdrhistogram) | ||||
|  | ||||
| A pure Go implementation of the [HDR Histogram](https://github.com/HdrHistogram/HdrHistogram). | ||||
|  | ||||
| > A Histogram that supports recording and analyzing sampled data value counts | ||||
| > across a configurable integer value range with configurable value precision | ||||
| > within the range. Value precision is expressed as the number of significant | ||||
| > digits in the value recording, and provides control over value quantization | ||||
| > behavior across the value range and the subsequent value resolution at any | ||||
| > given level. | ||||
|  | ||||
| For documentation, check [godoc](http://godoc.org/github.com/codahale/hdrhistogram). | ||||
							
								
								
									
										564
									
								
								vendor/github.com/codahale/hdrhistogram/hdr.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										564
									
								
								vendor/github.com/codahale/hdrhistogram/hdr.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,564 @@ | ||||
| // Package hdrhistogram provides an implementation of Gil Tene's HDR Histogram | ||||
| // data structure. The HDR Histogram allows for fast and accurate analysis of | ||||
| // the extreme ranges of data with non-normal distributions, like latency. | ||||
| package hdrhistogram | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"math" | ||||
| ) | ||||
|  | ||||
| // A Bracket is a part of a cumulative distribution. | ||||
| type Bracket struct { | ||||
| 	Quantile       float64 | ||||
| 	Count, ValueAt int64 | ||||
| } | ||||
|  | ||||
| // A Snapshot is an exported view of a Histogram, useful for serializing them. | ||||
| // A Histogram can be constructed from it by passing it to Import. | ||||
| type Snapshot struct { | ||||
| 	LowestTrackableValue  int64 | ||||
| 	HighestTrackableValue int64 | ||||
| 	SignificantFigures    int64 | ||||
| 	Counts                []int64 | ||||
| } | ||||
|  | ||||
| // A Histogram is a lossy data structure used to record the distribution of | ||||
| // non-normally distributed data (like latency) with a high degree of accuracy | ||||
| // and a bounded degree of precision. | ||||
| type Histogram struct { | ||||
| 	lowestTrackableValue        int64 | ||||
| 	highestTrackableValue       int64 | ||||
| 	unitMagnitude               int64 | ||||
| 	significantFigures          int64 | ||||
| 	subBucketHalfCountMagnitude int32 | ||||
| 	subBucketHalfCount          int32 | ||||
| 	subBucketMask               int64 | ||||
| 	subBucketCount              int32 | ||||
| 	bucketCount                 int32 | ||||
| 	countsLen                   int32 | ||||
| 	totalCount                  int64 | ||||
| 	counts                      []int64 | ||||
| } | ||||
|  | ||||
| // New returns a new Histogram instance capable of tracking values in the given | ||||
| // range and with the given amount of precision. | ||||
| func New(minValue, maxValue int64, sigfigs int) *Histogram { | ||||
| 	if sigfigs < 1 || 5 < sigfigs { | ||||
| 		panic(fmt.Errorf("sigfigs must be [1,5] (was %d)", sigfigs)) | ||||
| 	} | ||||
|  | ||||
| 	largestValueWithSingleUnitResolution := 2 * math.Pow10(sigfigs) | ||||
| 	subBucketCountMagnitude := int32(math.Ceil(math.Log2(float64(largestValueWithSingleUnitResolution)))) | ||||
|  | ||||
| 	subBucketHalfCountMagnitude := subBucketCountMagnitude | ||||
| 	if subBucketHalfCountMagnitude < 1 { | ||||
| 		subBucketHalfCountMagnitude = 1 | ||||
| 	} | ||||
| 	subBucketHalfCountMagnitude-- | ||||
|  | ||||
| 	unitMagnitude := int32(math.Floor(math.Log2(float64(minValue)))) | ||||
| 	if unitMagnitude < 0 { | ||||
| 		unitMagnitude = 0 | ||||
| 	} | ||||
|  | ||||
| 	subBucketCount := int32(math.Pow(2, float64(subBucketHalfCountMagnitude)+1)) | ||||
|  | ||||
| 	subBucketHalfCount := subBucketCount / 2 | ||||
| 	subBucketMask := int64(subBucketCount-1) << uint(unitMagnitude) | ||||
|  | ||||
| 	// determine exponent range needed to support the trackable value with no | ||||
| 	// overflow: | ||||
| 	smallestUntrackableValue := int64(subBucketCount) << uint(unitMagnitude) | ||||
| 	bucketsNeeded := int32(1) | ||||
| 	for smallestUntrackableValue < maxValue { | ||||
| 		smallestUntrackableValue <<= 1 | ||||
| 		bucketsNeeded++ | ||||
| 	} | ||||
|  | ||||
| 	bucketCount := bucketsNeeded | ||||
| 	countsLen := (bucketCount + 1) * (subBucketCount / 2) | ||||
|  | ||||
| 	return &Histogram{ | ||||
| 		lowestTrackableValue:        minValue, | ||||
| 		highestTrackableValue:       maxValue, | ||||
| 		unitMagnitude:               int64(unitMagnitude), | ||||
| 		significantFigures:          int64(sigfigs), | ||||
| 		subBucketHalfCountMagnitude: subBucketHalfCountMagnitude, | ||||
| 		subBucketHalfCount:          subBucketHalfCount, | ||||
| 		subBucketMask:               subBucketMask, | ||||
| 		subBucketCount:              subBucketCount, | ||||
| 		bucketCount:                 bucketCount, | ||||
| 		countsLen:                   countsLen, | ||||
| 		totalCount:                  0, | ||||
| 		counts:                      make([]int64, countsLen), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // ByteSize returns an estimate of the amount of memory allocated to the | ||||
| // histogram in bytes. | ||||
| // | ||||
| // N.B.: This does not take into account the overhead for slices, which are | ||||
| // small, constant, and specific to the compiler version. | ||||
| func (h *Histogram) ByteSize() int { | ||||
| 	return 6*8 + 5*4 + len(h.counts)*8 | ||||
| } | ||||
|  | ||||
| // Merge merges the data stored in the given histogram with the receiver, | ||||
| // returning the number of recorded values which had to be dropped. | ||||
| func (h *Histogram) Merge(from *Histogram) (dropped int64) { | ||||
| 	i := from.rIterator() | ||||
| 	for i.next() { | ||||
| 		v := i.valueFromIdx | ||||
| 		c := i.countAtIdx | ||||
|  | ||||
| 		if h.RecordValues(v, c) != nil { | ||||
| 			dropped += c | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return | ||||
| } | ||||
|  | ||||
| // TotalCount returns total number of values recorded. | ||||
| func (h *Histogram) TotalCount() int64 { | ||||
| 	return h.totalCount | ||||
| } | ||||
|  | ||||
| // Max returns the approximate maximum recorded value. | ||||
| func (h *Histogram) Max() int64 { | ||||
| 	var max int64 | ||||
| 	i := h.iterator() | ||||
| 	for i.next() { | ||||
| 		if i.countAtIdx != 0 { | ||||
| 			max = i.highestEquivalentValue | ||||
| 		} | ||||
| 	} | ||||
| 	return h.highestEquivalentValue(max) | ||||
| } | ||||
|  | ||||
| // Min returns the approximate minimum recorded value. | ||||
| func (h *Histogram) Min() int64 { | ||||
| 	var min int64 | ||||
| 	i := h.iterator() | ||||
| 	for i.next() { | ||||
| 		if i.countAtIdx != 0 && min == 0 { | ||||
| 			min = i.highestEquivalentValue | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| 	return h.lowestEquivalentValue(min) | ||||
| } | ||||
|  | ||||
| // Mean returns the approximate arithmetic mean of the recorded values. | ||||
| func (h *Histogram) Mean() float64 { | ||||
| 	if h.totalCount == 0 { | ||||
| 		return 0 | ||||
| 	} | ||||
| 	var total int64 | ||||
| 	i := h.iterator() | ||||
| 	for i.next() { | ||||
| 		if i.countAtIdx != 0 { | ||||
| 			total += i.countAtIdx * h.medianEquivalentValue(i.valueFromIdx) | ||||
| 		} | ||||
| 	} | ||||
| 	return float64(total) / float64(h.totalCount) | ||||
| } | ||||
|  | ||||
| // StdDev returns the approximate standard deviation of the recorded values. | ||||
| func (h *Histogram) StdDev() float64 { | ||||
| 	if h.totalCount == 0 { | ||||
| 		return 0 | ||||
| 	} | ||||
|  | ||||
| 	mean := h.Mean() | ||||
| 	geometricDevTotal := 0.0 | ||||
|  | ||||
| 	i := h.iterator() | ||||
| 	for i.next() { | ||||
| 		if i.countAtIdx != 0 { | ||||
| 			dev := float64(h.medianEquivalentValue(i.valueFromIdx)) - mean | ||||
| 			geometricDevTotal += (dev * dev) * float64(i.countAtIdx) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return math.Sqrt(geometricDevTotal / float64(h.totalCount)) | ||||
| } | ||||
|  | ||||
| // Reset deletes all recorded values and restores the histogram to its original | ||||
| // state. | ||||
| func (h *Histogram) Reset() { | ||||
| 	h.totalCount = 0 | ||||
| 	for i := range h.counts { | ||||
| 		h.counts[i] = 0 | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // RecordValue records the given value, returning an error if the value is out | ||||
| // of range. | ||||
| func (h *Histogram) RecordValue(v int64) error { | ||||
| 	return h.RecordValues(v, 1) | ||||
| } | ||||
|  | ||||
| // RecordCorrectedValue records the given value, correcting for stalls in the | ||||
| // recording process. This only works for processes which are recording values | ||||
| // at an expected interval (e.g., doing jitter analysis). Processes which are | ||||
| // recording ad-hoc values (e.g., latency for incoming requests) can't take | ||||
| // advantage of this. | ||||
| func (h *Histogram) RecordCorrectedValue(v, expectedInterval int64) error { | ||||
| 	if err := h.RecordValue(v); err != nil { | ||||
| 		return err | ||||
| 	} | ||||
|  | ||||
| 	if expectedInterval <= 0 || v <= expectedInterval { | ||||
| 		return nil | ||||
| 	} | ||||
|  | ||||
| 	missingValue := v - expectedInterval | ||||
| 	for missingValue >= expectedInterval { | ||||
| 		if err := h.RecordValue(missingValue); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 		missingValue -= expectedInterval | ||||
| 	} | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // RecordValues records n occurrences of the given value, returning an error if | ||||
| // the value is out of range. | ||||
| func (h *Histogram) RecordValues(v, n int64) error { | ||||
| 	idx := h.countsIndexFor(v) | ||||
| 	if idx < 0 || int(h.countsLen) <= idx { | ||||
| 		return fmt.Errorf("value %d is too large to be recorded", v) | ||||
| 	} | ||||
| 	h.counts[idx] += n | ||||
| 	h.totalCount += n | ||||
|  | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // ValueAtQuantile returns the recorded value at the given quantile (0..100). | ||||
| func (h *Histogram) ValueAtQuantile(q float64) int64 { | ||||
| 	if q > 100 { | ||||
| 		q = 100 | ||||
| 	} | ||||
|  | ||||
| 	total := int64(0) | ||||
| 	countAtPercentile := int64(((q / 100) * float64(h.totalCount)) + 0.5) | ||||
|  | ||||
| 	i := h.iterator() | ||||
| 	for i.next() { | ||||
| 		total += i.countAtIdx | ||||
| 		if total >= countAtPercentile { | ||||
| 			return h.highestEquivalentValue(i.valueFromIdx) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return 0 | ||||
| } | ||||
|  | ||||
| // CumulativeDistribution returns an ordered list of brackets of the | ||||
| // distribution of recorded values. | ||||
| func (h *Histogram) CumulativeDistribution() []Bracket { | ||||
| 	var result []Bracket | ||||
|  | ||||
| 	i := h.pIterator(1) | ||||
| 	for i.next() { | ||||
| 		result = append(result, Bracket{ | ||||
| 			Quantile: i.percentile, | ||||
| 			Count:    i.countToIdx, | ||||
| 			ValueAt:  i.highestEquivalentValue, | ||||
| 		}) | ||||
| 	} | ||||
|  | ||||
| 	return result | ||||
| } | ||||
|  | ||||
| // SignificantFigures returns the significant figures used to create the | ||||
| // histogram | ||||
| func (h *Histogram) SignificantFigures() int64 { | ||||
| 	return h.significantFigures | ||||
| } | ||||
|  | ||||
| // LowestTrackableValue returns the lower bound on values that will be added | ||||
| // to the histogram | ||||
| func (h *Histogram) LowestTrackableValue() int64 { | ||||
| 	return h.lowestTrackableValue | ||||
| } | ||||
|  | ||||
| // HighestTrackableValue returns the upper bound on values that will be added | ||||
| // to the histogram | ||||
| func (h *Histogram) HighestTrackableValue() int64 { | ||||
| 	return h.highestTrackableValue | ||||
| } | ||||
|  | ||||
| // Histogram bar for plotting | ||||
| type Bar struct { | ||||
| 	From, To, Count int64 | ||||
| } | ||||
|  | ||||
| // Pretty print as csv for easy plotting | ||||
| func (b Bar) String() string { | ||||
| 	return fmt.Sprintf("%v, %v, %v\n", b.From, b.To, b.Count) | ||||
| } | ||||
|  | ||||
| // Distribution returns an ordered list of bars of the | ||||
| // distribution of recorded values, counts can be normalized to a probability | ||||
| func (h *Histogram) Distribution() (result []Bar) { | ||||
| 	i := h.iterator() | ||||
| 	for i.next() { | ||||
| 		result = append(result, Bar{ | ||||
| 			Count: i.countAtIdx, | ||||
| 			From:  h.lowestEquivalentValue(i.valueFromIdx), | ||||
| 			To:    i.highestEquivalentValue, | ||||
| 		}) | ||||
| 	} | ||||
|  | ||||
| 	return result | ||||
| } | ||||
|  | ||||
| // Equals returns true if the two Histograms are equivalent, false if not. | ||||
| func (h *Histogram) Equals(other *Histogram) bool { | ||||
| 	switch { | ||||
| 	case | ||||
| 		h.lowestTrackableValue != other.lowestTrackableValue, | ||||
| 		h.highestTrackableValue != other.highestTrackableValue, | ||||
| 		h.unitMagnitude != other.unitMagnitude, | ||||
| 		h.significantFigures != other.significantFigures, | ||||
| 		h.subBucketHalfCountMagnitude != other.subBucketHalfCountMagnitude, | ||||
| 		h.subBucketHalfCount != other.subBucketHalfCount, | ||||
| 		h.subBucketMask != other.subBucketMask, | ||||
| 		h.subBucketCount != other.subBucketCount, | ||||
| 		h.bucketCount != other.bucketCount, | ||||
| 		h.countsLen != other.countsLen, | ||||
| 		h.totalCount != other.totalCount: | ||||
| 		return false | ||||
| 	default: | ||||
| 		for i, c := range h.counts { | ||||
| 			if c != other.counts[i] { | ||||
| 				return false | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| // Export returns a snapshot view of the Histogram. This can be later passed to | ||||
| // Import to construct a new Histogram with the same state. | ||||
| func (h *Histogram) Export() *Snapshot { | ||||
| 	return &Snapshot{ | ||||
| 		LowestTrackableValue:  h.lowestTrackableValue, | ||||
| 		HighestTrackableValue: h.highestTrackableValue, | ||||
| 		SignificantFigures:    h.significantFigures, | ||||
| 		Counts:                append([]int64(nil), h.counts...), // copy | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Import returns a new Histogram populated from the Snapshot data (which the | ||||
| // caller must stop accessing). | ||||
| func Import(s *Snapshot) *Histogram { | ||||
| 	h := New(s.LowestTrackableValue, s.HighestTrackableValue, int(s.SignificantFigures)) | ||||
| 	h.counts = s.Counts | ||||
| 	totalCount := int64(0) | ||||
| 	for i := int32(0); i < h.countsLen; i++ { | ||||
| 		countAtIndex := h.counts[i] | ||||
| 		if countAtIndex > 0 { | ||||
| 			totalCount += countAtIndex | ||||
| 		} | ||||
| 	} | ||||
| 	h.totalCount = totalCount | ||||
| 	return h | ||||
| } | ||||
|  | ||||
| func (h *Histogram) iterator() *iterator { | ||||
| 	return &iterator{ | ||||
| 		h:            h, | ||||
| 		subBucketIdx: -1, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (h *Histogram) rIterator() *rIterator { | ||||
| 	return &rIterator{ | ||||
| 		iterator: iterator{ | ||||
| 			h:            h, | ||||
| 			subBucketIdx: -1, | ||||
| 		}, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (h *Histogram) pIterator(ticksPerHalfDistance int32) *pIterator { | ||||
| 	return &pIterator{ | ||||
| 		iterator: iterator{ | ||||
| 			h:            h, | ||||
| 			subBucketIdx: -1, | ||||
| 		}, | ||||
| 		ticksPerHalfDistance: ticksPerHalfDistance, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (h *Histogram) sizeOfEquivalentValueRange(v int64) int64 { | ||||
| 	bucketIdx := h.getBucketIndex(v) | ||||
| 	subBucketIdx := h.getSubBucketIdx(v, bucketIdx) | ||||
| 	adjustedBucket := bucketIdx | ||||
| 	if subBucketIdx >= h.subBucketCount { | ||||
| 		adjustedBucket++ | ||||
| 	} | ||||
| 	return int64(1) << uint(h.unitMagnitude+int64(adjustedBucket)) | ||||
| } | ||||
|  | ||||
| func (h *Histogram) valueFromIndex(bucketIdx, subBucketIdx int32) int64 { | ||||
| 	return int64(subBucketIdx) << uint(int64(bucketIdx)+h.unitMagnitude) | ||||
| } | ||||
|  | ||||
| func (h *Histogram) lowestEquivalentValue(v int64) int64 { | ||||
| 	bucketIdx := h.getBucketIndex(v) | ||||
| 	subBucketIdx := h.getSubBucketIdx(v, bucketIdx) | ||||
| 	return h.valueFromIndex(bucketIdx, subBucketIdx) | ||||
| } | ||||
|  | ||||
| func (h *Histogram) nextNonEquivalentValue(v int64) int64 { | ||||
| 	return h.lowestEquivalentValue(v) + h.sizeOfEquivalentValueRange(v) | ||||
| } | ||||
|  | ||||
| func (h *Histogram) highestEquivalentValue(v int64) int64 { | ||||
| 	return h.nextNonEquivalentValue(v) - 1 | ||||
| } | ||||
|  | ||||
| func (h *Histogram) medianEquivalentValue(v int64) int64 { | ||||
| 	return h.lowestEquivalentValue(v) + (h.sizeOfEquivalentValueRange(v) >> 1) | ||||
| } | ||||
|  | ||||
| func (h *Histogram) getCountAtIndex(bucketIdx, subBucketIdx int32) int64 { | ||||
| 	return h.counts[h.countsIndex(bucketIdx, subBucketIdx)] | ||||
| } | ||||
|  | ||||
| func (h *Histogram) countsIndex(bucketIdx, subBucketIdx int32) int32 { | ||||
| 	bucketBaseIdx := (bucketIdx + 1) << uint(h.subBucketHalfCountMagnitude) | ||||
| 	offsetInBucket := subBucketIdx - h.subBucketHalfCount | ||||
| 	return bucketBaseIdx + offsetInBucket | ||||
| } | ||||
|  | ||||
| func (h *Histogram) getBucketIndex(v int64) int32 { | ||||
| 	pow2Ceiling := bitLen(v | h.subBucketMask) | ||||
| 	return int32(pow2Ceiling - int64(h.unitMagnitude) - | ||||
| 		int64(h.subBucketHalfCountMagnitude+1)) | ||||
| } | ||||
|  | ||||
| func (h *Histogram) getSubBucketIdx(v int64, idx int32) int32 { | ||||
| 	return int32(v >> uint(int64(idx)+int64(h.unitMagnitude))) | ||||
| } | ||||
|  | ||||
| func (h *Histogram) countsIndexFor(v int64) int { | ||||
| 	bucketIdx := h.getBucketIndex(v) | ||||
| 	subBucketIdx := h.getSubBucketIdx(v, bucketIdx) | ||||
| 	return int(h.countsIndex(bucketIdx, subBucketIdx)) | ||||
| } | ||||
|  | ||||
| type iterator struct { | ||||
| 	h                                    *Histogram | ||||
| 	bucketIdx, subBucketIdx              int32 | ||||
| 	countAtIdx, countToIdx, valueFromIdx int64 | ||||
| 	highestEquivalentValue               int64 | ||||
| } | ||||
|  | ||||
| func (i *iterator) next() bool { | ||||
| 	if i.countToIdx >= i.h.totalCount { | ||||
| 		return false | ||||
| 	} | ||||
|  | ||||
| 	// increment bucket | ||||
| 	i.subBucketIdx++ | ||||
| 	if i.subBucketIdx >= i.h.subBucketCount { | ||||
| 		i.subBucketIdx = i.h.subBucketHalfCount | ||||
| 		i.bucketIdx++ | ||||
| 	} | ||||
|  | ||||
| 	if i.bucketIdx >= i.h.bucketCount { | ||||
| 		return false | ||||
| 	} | ||||
|  | ||||
| 	i.countAtIdx = i.h.getCountAtIndex(i.bucketIdx, i.subBucketIdx) | ||||
| 	i.countToIdx += i.countAtIdx | ||||
| 	i.valueFromIdx = i.h.valueFromIndex(i.bucketIdx, i.subBucketIdx) | ||||
| 	i.highestEquivalentValue = i.h.highestEquivalentValue(i.valueFromIdx) | ||||
|  | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| type rIterator struct { | ||||
| 	iterator | ||||
| 	countAddedThisStep int64 | ||||
| } | ||||
|  | ||||
| func (r *rIterator) next() bool { | ||||
| 	for r.iterator.next() { | ||||
| 		if r.countAtIdx != 0 { | ||||
| 			r.countAddedThisStep = r.countAtIdx | ||||
| 			return true | ||||
| 		} | ||||
| 	} | ||||
| 	return false | ||||
| } | ||||
|  | ||||
| type pIterator struct { | ||||
| 	iterator | ||||
| 	seenLastValue          bool | ||||
| 	ticksPerHalfDistance   int32 | ||||
| 	percentileToIteratorTo float64 | ||||
| 	percentile             float64 | ||||
| } | ||||
|  | ||||
| func (p *pIterator) next() bool { | ||||
| 	if !(p.countToIdx < p.h.totalCount) { | ||||
| 		if p.seenLastValue { | ||||
| 			return false | ||||
| 		} | ||||
|  | ||||
| 		p.seenLastValue = true | ||||
| 		p.percentile = 100 | ||||
|  | ||||
| 		return true | ||||
| 	} | ||||
|  | ||||
| 	if p.subBucketIdx == -1 && !p.iterator.next() { | ||||
| 		return false | ||||
| 	} | ||||
|  | ||||
| 	var done = false | ||||
| 	for !done { | ||||
| 		currentPercentile := (100.0 * float64(p.countToIdx)) / float64(p.h.totalCount) | ||||
| 		if p.countAtIdx != 0 && p.percentileToIteratorTo <= currentPercentile { | ||||
| 			p.percentile = p.percentileToIteratorTo | ||||
| 			halfDistance := math.Trunc(math.Pow(2, math.Trunc(math.Log2(100.0/(100.0-p.percentileToIteratorTo)))+1)) | ||||
| 			percentileReportingTicks := float64(p.ticksPerHalfDistance) * halfDistance | ||||
| 			p.percentileToIteratorTo += 100.0 / percentileReportingTicks | ||||
| 			return true | ||||
| 		} | ||||
| 		done = !p.iterator.next() | ||||
| 	} | ||||
|  | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| func bitLen(x int64) (n int64) { | ||||
| 	for ; x >= 0x8000; x >>= 16 { | ||||
| 		n += 16 | ||||
| 	} | ||||
| 	if x >= 0x80 { | ||||
| 		x >>= 8 | ||||
| 		n += 8 | ||||
| 	} | ||||
| 	if x >= 0x8 { | ||||
| 		x >>= 4 | ||||
| 		n += 4 | ||||
| 	} | ||||
| 	if x >= 0x2 { | ||||
| 		x >>= 2 | ||||
| 		n += 2 | ||||
| 	} | ||||
| 	if x >= 0x1 { | ||||
| 		n++ | ||||
| 	} | ||||
| 	return | ||||
| } | ||||
							
								
								
									
										45
									
								
								vendor/github.com/codahale/hdrhistogram/window.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										45
									
								
								vendor/github.com/codahale/hdrhistogram/window.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,45 @@ | ||||
| package hdrhistogram | ||||
|  | ||||
| // A WindowedHistogram combines histograms to provide windowed statistics. | ||||
| type WindowedHistogram struct { | ||||
| 	idx int | ||||
| 	h   []Histogram | ||||
| 	m   *Histogram | ||||
|  | ||||
| 	Current *Histogram | ||||
| } | ||||
|  | ||||
| // NewWindowed creates a new WindowedHistogram with N underlying histograms with | ||||
| // the given parameters. | ||||
| func NewWindowed(n int, minValue, maxValue int64, sigfigs int) *WindowedHistogram { | ||||
| 	w := WindowedHistogram{ | ||||
| 		idx: -1, | ||||
| 		h:   make([]Histogram, n), | ||||
| 		m:   New(minValue, maxValue, sigfigs), | ||||
| 	} | ||||
|  | ||||
| 	for i := range w.h { | ||||
| 		w.h[i] = *New(minValue, maxValue, sigfigs) | ||||
| 	} | ||||
| 	w.Rotate() | ||||
|  | ||||
| 	return &w | ||||
| } | ||||
|  | ||||
| // Merge returns a histogram which includes the recorded values from all the | ||||
| // sections of the window. | ||||
| func (w *WindowedHistogram) Merge() *Histogram { | ||||
| 	w.m.Reset() | ||||
| 	for _, h := range w.h { | ||||
| 		w.m.Merge(&h) | ||||
| 	} | ||||
| 	return w.m | ||||
| } | ||||
|  | ||||
| // Rotate resets the oldest histogram and rotates it to be used as the current | ||||
| // histogram. | ||||
| func (w *WindowedHistogram) Rotate() { | ||||
| 	w.idx++ | ||||
| 	w.Current = &w.h[w.idx%len(w.h)] | ||||
| 	w.Current.Reset() | ||||
| } | ||||
							
								
								
									
										14
									
								
								vendor/github.com/opentracing/opentracing-go/CHANGELOG.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										14
									
								
								vendor/github.com/opentracing/opentracing-go/CHANGELOG.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,14 @@ | ||||
| Changes by Version | ||||
| ================== | ||||
|  | ||||
| 1.1.0 (unreleased) | ||||
| ------------------- | ||||
|  | ||||
| - Deprecate InitGlobalTracer() in favor of SetGlobalTracer() | ||||
|  | ||||
|  | ||||
| 1.0.0 (2016-09-26) | ||||
| ------------------- | ||||
|  | ||||
| - This release implements OpenTracing Specification 1.0 (http://opentracing.io/spec) | ||||
|  | ||||
							
								
								
									
										201
									
								
								vendor/github.com/opentracing/opentracing-go/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										201
									
								
								vendor/github.com/opentracing/opentracing-go/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,201 @@ | ||||
|                                  Apache License | ||||
|                            Version 2.0, January 2004 | ||||
|                         http://www.apache.org/licenses/ | ||||
|  | ||||
|    TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | ||||
|  | ||||
|    1. Definitions. | ||||
|  | ||||
|       "License" shall mean the terms and conditions for use, reproduction, | ||||
|       and distribution as defined by Sections 1 through 9 of this document. | ||||
|  | ||||
|       "Licensor" shall mean the copyright owner or entity authorized by | ||||
|       the copyright owner that is granting the License. | ||||
|  | ||||
|       "Legal Entity" shall mean the union of the acting entity and all | ||||
|       other entities that control, are controlled by, or are under common | ||||
|       control with that entity. For the purposes of this definition, | ||||
|       "control" means (i) the power, direct or indirect, to cause the | ||||
|       direction or management of such entity, whether by contract or | ||||
|       otherwise, or (ii) ownership of fifty percent (50%) or more of the | ||||
|       outstanding shares, or (iii) beneficial ownership of such entity. | ||||
|  | ||||
|       "You" (or "Your") shall mean an individual or Legal Entity | ||||
|       exercising permissions granted by this License. | ||||
|  | ||||
|       "Source" form shall mean the preferred form for making modifications, | ||||
|       including but not limited to software source code, documentation | ||||
|       source, and configuration files. | ||||
|  | ||||
|       "Object" form shall mean any form resulting from mechanical | ||||
|       transformation or translation of a Source form, including but | ||||
|       not limited to compiled object code, generated documentation, | ||||
|       and conversions to other media types. | ||||
|  | ||||
|       "Work" shall mean the work of authorship, whether in Source or | ||||
|       Object form, made available under the License, as indicated by a | ||||
|       copyright notice that is included in or attached to the work | ||||
|       (an example is provided in the Appendix below). | ||||
|  | ||||
|       "Derivative Works" shall mean any work, whether in Source or Object | ||||
|       form, that is based on (or derived from) the Work and for which the | ||||
|       editorial revisions, annotations, elaborations, or other modifications | ||||
|       represent, as a whole, an original work of authorship. For the purposes | ||||
|       of this License, Derivative Works shall not include works that remain | ||||
|       separable from, or merely link (or bind by name) to the interfaces of, | ||||
|       the Work and Derivative Works thereof. | ||||
|  | ||||
|       "Contribution" shall mean any work of authorship, including | ||||
|       the original version of the Work and any modifications or additions | ||||
|       to that Work or Derivative Works thereof, that is intentionally | ||||
|       submitted to Licensor for inclusion in the Work by the copyright owner | ||||
|       or by an individual or Legal Entity authorized to submit on behalf of | ||||
|       the copyright owner. For the purposes of this definition, "submitted" | ||||
|       means any form of electronic, verbal, or written communication sent | ||||
|       to the Licensor or its representatives, including but not limited to | ||||
|       communication on electronic mailing lists, source code control systems, | ||||
|       and issue tracking systems that are managed by, or on behalf of, the | ||||
|       Licensor for the purpose of discussing and improving the Work, but | ||||
|       excluding communication that is conspicuously marked or otherwise | ||||
|       designated in writing by the copyright owner as "Not a Contribution." | ||||
|  | ||||
|       "Contributor" shall mean Licensor and any individual or Legal Entity | ||||
|       on behalf of whom a Contribution has been received by Licensor and | ||||
|       subsequently incorporated within the Work. | ||||
|  | ||||
|    2. Grant of Copyright License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       copyright license to reproduce, prepare Derivative Works of, | ||||
|       publicly display, publicly perform, sublicense, and distribute the | ||||
|       Work and such Derivative Works in Source or Object form. | ||||
|  | ||||
|    3. Grant of Patent License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       (except as stated in this section) patent license to make, have made, | ||||
|       use, offer to sell, sell, import, and otherwise transfer the Work, | ||||
|       where such license applies only to those patent claims licensable | ||||
|       by such Contributor that are necessarily infringed by their | ||||
|       Contribution(s) alone or by combination of their Contribution(s) | ||||
|       with the Work to which such Contribution(s) was submitted. If You | ||||
|       institute patent litigation against any entity (including a | ||||
|       cross-claim or counterclaim in a lawsuit) alleging that the Work | ||||
|       or a Contribution incorporated within the Work constitutes direct | ||||
|       or contributory patent infringement, then any patent licenses | ||||
|       granted to You under this License for that Work shall terminate | ||||
|       as of the date such litigation is filed. | ||||
|  | ||||
|    4. Redistribution. You may reproduce and distribute copies of the | ||||
|       Work or Derivative Works thereof in any medium, with or without | ||||
|       modifications, and in Source or Object form, provided that You | ||||
|       meet the following conditions: | ||||
|  | ||||
|       (a) You must give any other recipients of the Work or | ||||
|           Derivative Works a copy of this License; and | ||||
|  | ||||
|       (b) You must cause any modified files to carry prominent notices | ||||
|           stating that You changed the files; and | ||||
|  | ||||
|       (c) You must retain, in the Source form of any Derivative Works | ||||
|           that You distribute, all copyright, patent, trademark, and | ||||
|           attribution notices from the Source form of the Work, | ||||
|           excluding those notices that do not pertain to any part of | ||||
|           the Derivative Works; and | ||||
|  | ||||
|       (d) If the Work includes a "NOTICE" text file as part of its | ||||
|           distribution, then any Derivative Works that You distribute must | ||||
|           include a readable copy of the attribution notices contained | ||||
|           within such NOTICE file, excluding those notices that do not | ||||
|           pertain to any part of the Derivative Works, in at least one | ||||
|           of the following places: within a NOTICE text file distributed | ||||
|           as part of the Derivative Works; within the Source form or | ||||
|           documentation, if provided along with the Derivative Works; or, | ||||
|           within a display generated by the Derivative Works, if and | ||||
|           wherever such third-party notices normally appear. The contents | ||||
|           of the NOTICE file are for informational purposes only and | ||||
|           do not modify the License. You may add Your own attribution | ||||
|           notices within Derivative Works that You distribute, alongside | ||||
|           or as an addendum to the NOTICE text from the Work, provided | ||||
|           that such additional attribution notices cannot be construed | ||||
|           as modifying the License. | ||||
|  | ||||
|       You may add Your own copyright statement to Your modifications and | ||||
|       may provide additional or different license terms and conditions | ||||
|       for use, reproduction, or distribution of Your modifications, or | ||||
|       for any such Derivative Works as a whole, provided Your use, | ||||
|       reproduction, and distribution of the Work otherwise complies with | ||||
|       the conditions stated in this License. | ||||
|  | ||||
|    5. Submission of Contributions. Unless You explicitly state otherwise, | ||||
|       any Contribution intentionally submitted for inclusion in the Work | ||||
|       by You to the Licensor shall be under the terms and conditions of | ||||
|       this License, without any additional terms or conditions. | ||||
|       Notwithstanding the above, nothing herein shall supersede or modify | ||||
|       the terms of any separate license agreement you may have executed | ||||
|       with Licensor regarding such Contributions. | ||||
|  | ||||
|    6. Trademarks. This License does not grant permission to use the trade | ||||
|       names, trademarks, service marks, or product names of the Licensor, | ||||
|       except as required for reasonable and customary use in describing the | ||||
|       origin of the Work and reproducing the content of the NOTICE file. | ||||
|  | ||||
|    7. Disclaimer of Warranty. Unless required by applicable law or | ||||
|       agreed to in writing, Licensor provides the Work (and each | ||||
|       Contributor provides its Contributions) on an "AS IS" BASIS, | ||||
|       WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | ||||
|       implied, including, without limitation, any warranties or conditions | ||||
|       of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | ||||
|       PARTICULAR PURPOSE. You are solely responsible for determining the | ||||
|       appropriateness of using or redistributing the Work and assume any | ||||
|       risks associated with Your exercise of permissions under this License. | ||||
|  | ||||
|    8. Limitation of Liability. In no event and under no legal theory, | ||||
|       whether in tort (including negligence), contract, or otherwise, | ||||
|       unless required by applicable law (such as deliberate and grossly | ||||
|       negligent acts) or agreed to in writing, shall any Contributor be | ||||
|       liable to You for damages, including any direct, indirect, special, | ||||
|       incidental, or consequential damages of any character arising as a | ||||
|       result of this License or out of the use or inability to use the | ||||
|       Work (including but not limited to damages for loss of goodwill, | ||||
|       work stoppage, computer failure or malfunction, or any and all | ||||
|       other commercial damages or losses), even if such Contributor | ||||
|       has been advised of the possibility of such damages. | ||||
|  | ||||
|    9. Accepting Warranty or Additional Liability. While redistributing | ||||
|       the Work or Derivative Works thereof, You may choose to offer, | ||||
|       and charge a fee for, acceptance of support, warranty, indemnity, | ||||
|       or other liability obligations and/or rights consistent with this | ||||
|       License. However, in accepting such obligations, You may act only | ||||
|       on Your own behalf and on Your sole responsibility, not on behalf | ||||
|       of any other Contributor, and only if You agree to indemnify, | ||||
|       defend, and hold each Contributor harmless for any liability | ||||
|       incurred by, or claims asserted against, such Contributor by reason | ||||
|       of your accepting any such warranty or additional liability. | ||||
|  | ||||
|    END OF TERMS AND CONDITIONS | ||||
|  | ||||
|    APPENDIX: How to apply the Apache License to your work. | ||||
|  | ||||
|       To apply the Apache License to your work, attach the following | ||||
|       boilerplate notice, with the fields enclosed by brackets "{}" | ||||
|       replaced with your own identifying information. (Don't include | ||||
|       the brackets!)  The text should be enclosed in the appropriate | ||||
|       comment syntax for the file format. We also recommend that a | ||||
|       file or class name and description of purpose be included on the | ||||
|       same "printed page" as the copyright notice for easier | ||||
|       identification within third-party archives. | ||||
|  | ||||
|    Copyright 2016 The OpenTracing Authors | ||||
|  | ||||
|    Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|    you may not use this file except in compliance with the License. | ||||
|    You may obtain a copy of the License at | ||||
|  | ||||
|        http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
|    Unless required by applicable law or agreed to in writing, software | ||||
|    distributed under the License is distributed on an "AS IS" BASIS, | ||||
|    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|    See the License for the specific language governing permissions and | ||||
|    limitations under the License. | ||||
							
								
								
									
										32
									
								
								vendor/github.com/opentracing/opentracing-go/Makefile
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										32
									
								
								vendor/github.com/opentracing/opentracing-go/Makefile
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,32 @@ | ||||
| PACKAGES := . ./mocktracer/... ./ext/... | ||||
|  | ||||
| .DEFAULT_GOAL := test-and-lint | ||||
|  | ||||
| .PHONE: test-and-lint | ||||
|  | ||||
| test-and-lint: test lint | ||||
|  | ||||
| .PHONY: test | ||||
| test: | ||||
| 	go test -v -cover -race ./... | ||||
|  | ||||
| cover: | ||||
| 	@rm -rf cover-all.out | ||||
| 	$(foreach pkg, $(PACKAGES), $(MAKE) cover-pkg PKG=$(pkg) || true;) | ||||
| 	@grep mode: cover.out > coverage.out | ||||
| 	@cat cover-all.out >> coverage.out | ||||
| 	go tool cover -html=coverage.out -o cover.html | ||||
| 	@rm -rf cover.out cover-all.out coverage.out | ||||
|  | ||||
| cover-pkg: | ||||
| 	go test -coverprofile cover.out $(PKG) | ||||
| 	@grep -v mode: cover.out >> cover-all.out | ||||
|  | ||||
| .PHONY: lint | ||||
| lint: | ||||
| 	go fmt ./... | ||||
| 	golint ./... | ||||
| 	@# Run again with magic to exit non-zero if golint outputs anything. | ||||
| 	@! (golint ./... | read dummy) | ||||
| 	go vet ./... | ||||
|  | ||||
							
								
								
									
										171
									
								
								vendor/github.com/opentracing/opentracing-go/README.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										171
									
								
								vendor/github.com/opentracing/opentracing-go/README.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,171 @@ | ||||
| [](https://gitter.im/opentracing/public) [](https://travis-ci.org/opentracing/opentracing-go) [](http://godoc.org/github.com/opentracing/opentracing-go) | ||||
| [](https://sourcegraph.com/github.com/opentracing/opentracing-go?badge) | ||||
|  | ||||
| # OpenTracing API for Go | ||||
|  | ||||
| This package is a Go platform API for OpenTracing. | ||||
|  | ||||
| ## Required Reading | ||||
|  | ||||
| In order to understand the Go platform API, one must first be familiar with the | ||||
| [OpenTracing project](http://opentracing.io) and | ||||
| [terminology](http://opentracing.io/documentation/pages/spec.html) more specifically. | ||||
|  | ||||
| ## API overview for those adding instrumentation | ||||
|  | ||||
| Everyday consumers of this `opentracing` package really only need to worry | ||||
| about a couple of key abstractions: the `StartSpan` function, the `Span` | ||||
| interface, and binding a `Tracer` at `main()`-time. Here are code snippets | ||||
| demonstrating some important use cases. | ||||
|  | ||||
| #### Singleton initialization | ||||
|  | ||||
| The simplest starting point is `./default_tracer.go`. As early as possible, call | ||||
|  | ||||
| ```go | ||||
|     import "github.com/opentracing/opentracing-go" | ||||
|     import ".../some_tracing_impl" | ||||
|  | ||||
|     func main() { | ||||
|         opentracing.SetGlobalTracer( | ||||
|             // tracing impl specific: | ||||
|             some_tracing_impl.New(...), | ||||
|         ) | ||||
|         ... | ||||
|     } | ||||
| ``` | ||||
|  | ||||
| #### Non-Singleton initialization | ||||
|  | ||||
| If you prefer direct control to singletons, manage ownership of the | ||||
| `opentracing.Tracer` implementation explicitly. | ||||
|  | ||||
| #### Creating a Span given an existing Go `context.Context` | ||||
|  | ||||
| If you use `context.Context` in your application, OpenTracing's Go library will | ||||
| happily rely on it for `Span` propagation. To start a new (blocking child) | ||||
| `Span`, you can use `StartSpanFromContext`. | ||||
|  | ||||
| ```go | ||||
|     func xyz(ctx context.Context, ...) { | ||||
|         ... | ||||
|         span, ctx := opentracing.StartSpanFromContext(ctx, "operation_name") | ||||
|         defer span.Finish() | ||||
|         span.LogFields( | ||||
|             log.String("event", "soft error"), | ||||
|             log.String("type", "cache timeout"), | ||||
|             log.Int("waited.millis", 1500)) | ||||
|         ... | ||||
|     } | ||||
| ``` | ||||
|  | ||||
| #### Starting an empty trace by creating a "root span" | ||||
|  | ||||
| It's always possible to create a "root" `Span` with no parent or other causal | ||||
| reference. | ||||
|  | ||||
| ```go | ||||
|     func xyz() { | ||||
|         ... | ||||
|         sp := opentracing.StartSpan("operation_name") | ||||
|         defer sp.Finish() | ||||
|         ... | ||||
|     } | ||||
| ``` | ||||
|  | ||||
| #### Creating a (child) Span given an existing (parent) Span | ||||
|  | ||||
| ```go | ||||
|     func xyz(parentSpan opentracing.Span, ...) { | ||||
|         ... | ||||
|         sp := opentracing.StartSpan( | ||||
|             "operation_name", | ||||
|             opentracing.ChildOf(parentSpan.Context())) | ||||
|         defer sp.Finish() | ||||
|         ... | ||||
|     } | ||||
| ``` | ||||
|  | ||||
| #### Serializing to the wire | ||||
|  | ||||
| ```go | ||||
|     func makeSomeRequest(ctx context.Context) ... { | ||||
|         if span := opentracing.SpanFromContext(ctx); span != nil { | ||||
|             httpClient := &http.Client{} | ||||
|             httpReq, _ := http.NewRequest("GET", "http://myservice/", nil) | ||||
|  | ||||
|             // Transmit the span's TraceContext as HTTP headers on our | ||||
|             // outbound request. | ||||
|             opentracing.GlobalTracer().Inject( | ||||
|                 span.Context(), | ||||
|                 opentracing.HTTPHeaders, | ||||
|                 opentracing.HTTPHeadersCarrier(httpReq.Header)) | ||||
|  | ||||
|             resp, err := httpClient.Do(httpReq) | ||||
|             ... | ||||
|         } | ||||
|         ... | ||||
|     } | ||||
| ``` | ||||
|  | ||||
| #### Deserializing from the wire | ||||
|  | ||||
| ```go | ||||
|     http.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { | ||||
|         var serverSpan opentracing.Span | ||||
|         appSpecificOperationName := ... | ||||
|         wireContext, err := opentracing.GlobalTracer().Extract( | ||||
|             opentracing.HTTPHeaders, | ||||
|             opentracing.HTTPHeadersCarrier(req.Header)) | ||||
|         if err != nil { | ||||
|             // Optionally record something about err here | ||||
|         } | ||||
|  | ||||
|         // Create the span referring to the RPC client if available. | ||||
|         // If wireContext == nil, a root span will be created. | ||||
|         serverSpan = opentracing.StartSpan( | ||||
|             appSpecificOperationName, | ||||
|             ext.RPCServerOption(wireContext)) | ||||
|  | ||||
|         defer serverSpan.Finish() | ||||
|  | ||||
|         ctx := opentracing.ContextWithSpan(context.Background(), serverSpan) | ||||
|         ... | ||||
|     } | ||||
| ``` | ||||
|  | ||||
| #### Conditionally capture a field using `log.Noop` | ||||
|  | ||||
| In some situations, you may want to dynamically decide whether or not | ||||
| to log a field.  For example, you may want to capture additional data, | ||||
| such as a customer ID, in non-production environments: | ||||
|  | ||||
| ```go | ||||
|     func Customer(order *Order) log.Field { | ||||
|         if os.Getenv("ENVIRONMENT") == "dev" { | ||||
|             return log.String("customer", order.Customer.ID) | ||||
|         } | ||||
|         return log.Noop() | ||||
|     } | ||||
| ``` | ||||
|  | ||||
| #### Goroutine-safety | ||||
|  | ||||
| The entire public API is goroutine-safe and does not require external | ||||
| synchronization. | ||||
|  | ||||
| ## API pointers for those implementing a tracing system | ||||
|  | ||||
| Tracing system implementors may be able to reuse or copy-paste-modify the `basictracer` package, found [here](https://github.com/opentracing/basictracer-go). In particular, see `basictracer.New(...)`. | ||||
|  | ||||
| ## API compatibility | ||||
|  | ||||
| For the time being, "mild" backwards-incompatible changes may be made without changing the major version number. As OpenTracing and `opentracing-go` mature, backwards compatibility will become more of a priority. | ||||
|  | ||||
| ## Tracer test suite | ||||
|  | ||||
| A test suite is available in the [harness](https://godoc.org/github.com/opentracing/opentracing-go/harness) package that can assist Tracer implementors to assert that their Tracer is working correctly. | ||||
|  | ||||
| ## Licensing | ||||
|  | ||||
| [Apache 2.0 License](./LICENSE). | ||||
							
								
								
									
										210
									
								
								vendor/github.com/opentracing/opentracing-go/ext/tags.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										210
									
								
								vendor/github.com/opentracing/opentracing-go/ext/tags.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,210 @@ | ||||
| package ext | ||||
|  | ||||
| import opentracing "github.com/opentracing/opentracing-go" | ||||
|  | ||||
| // These constants define common tag names recommended for better portability across | ||||
| // tracing systems and languages/platforms. | ||||
| // | ||||
| // The tag names are defined as typed strings, so that in addition to the usual use | ||||
| // | ||||
| //     span.setTag(TagName, value) | ||||
| // | ||||
| // they also support value type validation via this additional syntax: | ||||
| // | ||||
| //    TagName.Set(span, value) | ||||
| // | ||||
| var ( | ||||
| 	////////////////////////////////////////////////////////////////////// | ||||
| 	// SpanKind (client/server or producer/consumer) | ||||
| 	////////////////////////////////////////////////////////////////////// | ||||
|  | ||||
| 	// SpanKind hints at relationship between spans, e.g. client/server | ||||
| 	SpanKind = spanKindTagName("span.kind") | ||||
|  | ||||
| 	// SpanKindRPCClient marks a span representing the client-side of an RPC | ||||
| 	// or other remote call | ||||
| 	SpanKindRPCClientEnum = SpanKindEnum("client") | ||||
| 	SpanKindRPCClient     = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCClientEnum} | ||||
|  | ||||
| 	// SpanKindRPCServer marks a span representing the server-side of an RPC | ||||
| 	// or other remote call | ||||
| 	SpanKindRPCServerEnum = SpanKindEnum("server") | ||||
| 	SpanKindRPCServer     = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCServerEnum} | ||||
|  | ||||
| 	// SpanKindProducer marks a span representing the producer-side of a | ||||
| 	// message bus | ||||
| 	SpanKindProducerEnum = SpanKindEnum("producer") | ||||
| 	SpanKindProducer     = opentracing.Tag{Key: string(SpanKind), Value: SpanKindProducerEnum} | ||||
|  | ||||
| 	// SpanKindConsumer marks a span representing the consumer-side of a | ||||
| 	// message bus | ||||
| 	SpanKindConsumerEnum = SpanKindEnum("consumer") | ||||
| 	SpanKindConsumer     = opentracing.Tag{Key: string(SpanKind), Value: SpanKindConsumerEnum} | ||||
|  | ||||
| 	////////////////////////////////////////////////////////////////////// | ||||
| 	// Component name | ||||
| 	////////////////////////////////////////////////////////////////////// | ||||
|  | ||||
| 	// Component is a low-cardinality identifier of the module, library, | ||||
| 	// or package that is generating a span. | ||||
| 	Component = stringTagName("component") | ||||
|  | ||||
| 	////////////////////////////////////////////////////////////////////// | ||||
| 	// Sampling hint | ||||
| 	////////////////////////////////////////////////////////////////////// | ||||
|  | ||||
| 	// SamplingPriority determines the priority of sampling this Span. | ||||
| 	SamplingPriority = uint16TagName("sampling.priority") | ||||
|  | ||||
| 	////////////////////////////////////////////////////////////////////// | ||||
| 	// Peer tags. These tags can be emitted by either client-side of | ||||
| 	// server-side to describe the other side/service in a peer-to-peer | ||||
| 	// communications, like an RPC call. | ||||
| 	////////////////////////////////////////////////////////////////////// | ||||
|  | ||||
| 	// PeerService records the service name of the peer. | ||||
| 	PeerService = stringTagName("peer.service") | ||||
|  | ||||
| 	// PeerAddress records the address name of the peer. This may be a "ip:port", | ||||
| 	// a bare "hostname", a FQDN or even a database DSN substring | ||||
| 	// like "mysql://username@127.0.0.1:3306/dbname" | ||||
| 	PeerAddress = stringTagName("peer.address") | ||||
|  | ||||
| 	// PeerHostname records the host name of the peer | ||||
| 	PeerHostname = stringTagName("peer.hostname") | ||||
|  | ||||
| 	// PeerHostIPv4 records IP v4 host address of the peer | ||||
| 	PeerHostIPv4 = ipv4Tag("peer.ipv4") | ||||
|  | ||||
| 	// PeerHostIPv6 records IP v6 host address of the peer | ||||
| 	PeerHostIPv6 = stringTagName("peer.ipv6") | ||||
|  | ||||
| 	// PeerPort records port number of the peer | ||||
| 	PeerPort = uint16TagName("peer.port") | ||||
|  | ||||
| 	////////////////////////////////////////////////////////////////////// | ||||
| 	// HTTP Tags | ||||
| 	////////////////////////////////////////////////////////////////////// | ||||
|  | ||||
| 	// HTTPUrl should be the URL of the request being handled in this segment | ||||
| 	// of the trace, in standard URI format. The protocol is optional. | ||||
| 	HTTPUrl = stringTagName("http.url") | ||||
|  | ||||
| 	// HTTPMethod is the HTTP method of the request, and is case-insensitive. | ||||
| 	HTTPMethod = stringTagName("http.method") | ||||
|  | ||||
| 	// HTTPStatusCode is the numeric HTTP status code (200, 404, etc) of the | ||||
| 	// HTTP response. | ||||
| 	HTTPStatusCode = uint16TagName("http.status_code") | ||||
|  | ||||
| 	////////////////////////////////////////////////////////////////////// | ||||
| 	// DB Tags | ||||
| 	////////////////////////////////////////////////////////////////////// | ||||
|  | ||||
| 	// DBInstance is database instance name. | ||||
| 	DBInstance = stringTagName("db.instance") | ||||
|  | ||||
| 	// DBStatement is a database statement for the given database type. | ||||
| 	// It can be a query or a prepared statement (i.e., before substitution). | ||||
| 	DBStatement = stringTagName("db.statement") | ||||
|  | ||||
| 	// DBType is a database type. For any SQL database, "sql". | ||||
| 	// For others, the lower-case database category, e.g. "redis" | ||||
| 	DBType = stringTagName("db.type") | ||||
|  | ||||
| 	// DBUser is a username for accessing database. | ||||
| 	DBUser = stringTagName("db.user") | ||||
|  | ||||
| 	////////////////////////////////////////////////////////////////////// | ||||
| 	// Message Bus Tag | ||||
| 	////////////////////////////////////////////////////////////////////// | ||||
|  | ||||
| 	// MessageBusDestination is an address at which messages can be exchanged | ||||
| 	MessageBusDestination = stringTagName("message_bus.destination") | ||||
|  | ||||
| 	////////////////////////////////////////////////////////////////////// | ||||
| 	// Error Tag | ||||
| 	////////////////////////////////////////////////////////////////////// | ||||
|  | ||||
| 	// Error indicates that operation represented by the span resulted in an error. | ||||
| 	Error = boolTagName("error") | ||||
| ) | ||||
|  | ||||
| // --- | ||||
|  | ||||
| // SpanKindEnum represents common span types | ||||
| type SpanKindEnum string | ||||
|  | ||||
| type spanKindTagName string | ||||
|  | ||||
| // Set adds a string tag to the `span` | ||||
| func (tag spanKindTagName) Set(span opentracing.Span, value SpanKindEnum) { | ||||
| 	span.SetTag(string(tag), value) | ||||
| } | ||||
|  | ||||
| type rpcServerOption struct { | ||||
| 	clientContext opentracing.SpanContext | ||||
| } | ||||
|  | ||||
| func (r rpcServerOption) Apply(o *opentracing.StartSpanOptions) { | ||||
| 	if r.clientContext != nil { | ||||
| 		opentracing.ChildOf(r.clientContext).Apply(o) | ||||
| 	} | ||||
| 	SpanKindRPCServer.Apply(o) | ||||
| } | ||||
|  | ||||
| // RPCServerOption returns a StartSpanOption appropriate for an RPC server span | ||||
| // with `client` representing the metadata for the remote peer Span if available. | ||||
| // In case client == nil, due to the client not being instrumented, this RPC | ||||
| // server span will be a root span. | ||||
| func RPCServerOption(client opentracing.SpanContext) opentracing.StartSpanOption { | ||||
| 	return rpcServerOption{client} | ||||
| } | ||||
|  | ||||
| // --- | ||||
|  | ||||
| type stringTagName string | ||||
|  | ||||
| // Set adds a string tag to the `span` | ||||
| func (tag stringTagName) Set(span opentracing.Span, value string) { | ||||
| 	span.SetTag(string(tag), value) | ||||
| } | ||||
|  | ||||
| // --- | ||||
|  | ||||
| type uint32TagName string | ||||
|  | ||||
| // Set adds a uint32 tag to the `span` | ||||
| func (tag uint32TagName) Set(span opentracing.Span, value uint32) { | ||||
| 	span.SetTag(string(tag), value) | ||||
| } | ||||
|  | ||||
| // --- | ||||
|  | ||||
| type uint16TagName string | ||||
|  | ||||
| // Set adds a uint16 tag to the `span` | ||||
| func (tag uint16TagName) Set(span opentracing.Span, value uint16) { | ||||
| 	span.SetTag(string(tag), value) | ||||
| } | ||||
|  | ||||
| // --- | ||||
|  | ||||
| type boolTagName string | ||||
|  | ||||
| // Add adds a bool tag to the `span` | ||||
| func (tag boolTagName) Set(span opentracing.Span, value bool) { | ||||
| 	span.SetTag(string(tag), value) | ||||
| } | ||||
|  | ||||
| type ipv4Tag string | ||||
|  | ||||
| // Set adds IP v4 host address of the peer as an uint32 value to the `span`, keep this for backward and zipkin compatibility | ||||
| func (tag ipv4Tag) Set(span opentracing.Span, value uint32) { | ||||
| 	span.SetTag(string(tag), value) | ||||
| } | ||||
|  | ||||
| // SetString records IP v4 host address of the peer as a .-separated tuple to the `span`. E.g., "127.0.0.1" | ||||
| func (tag ipv4Tag) SetString(span opentracing.Span, value string) { | ||||
| 	span.SetTag(string(tag), value) | ||||
| } | ||||
							
								
								
									
										32
									
								
								vendor/github.com/opentracing/opentracing-go/globaltracer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										32
									
								
								vendor/github.com/opentracing/opentracing-go/globaltracer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,32 @@ | ||||
| package opentracing | ||||
|  | ||||
| var ( | ||||
| 	globalTracer Tracer = NoopTracer{} | ||||
| ) | ||||
|  | ||||
| // SetGlobalTracer sets the [singleton] opentracing.Tracer returned by | ||||
| // GlobalTracer(). Those who use GlobalTracer (rather than directly manage an | ||||
| // opentracing.Tracer instance) should call SetGlobalTracer as early as | ||||
| // possible in main(), prior to calling the `StartSpan` global func below. | ||||
| // Prior to calling `SetGlobalTracer`, any Spans started via the `StartSpan` | ||||
| // (etc) globals are noops. | ||||
| func SetGlobalTracer(tracer Tracer) { | ||||
| 	globalTracer = tracer | ||||
| } | ||||
|  | ||||
| // GlobalTracer returns the global singleton `Tracer` implementation. | ||||
| // Before `SetGlobalTracer()` is called, the `GlobalTracer()` is a noop | ||||
| // implementation that drops all data handed to it. | ||||
| func GlobalTracer() Tracer { | ||||
| 	return globalTracer | ||||
| } | ||||
|  | ||||
| // StartSpan defers to `Tracer.StartSpan`. See `GlobalTracer()`. | ||||
| func StartSpan(operationName string, opts ...StartSpanOption) Span { | ||||
| 	return globalTracer.StartSpan(operationName, opts...) | ||||
| } | ||||
|  | ||||
| // InitGlobalTracer is deprecated. Please use SetGlobalTracer. | ||||
| func InitGlobalTracer(tracer Tracer) { | ||||
| 	SetGlobalTracer(tracer) | ||||
| } | ||||
							
								
								
									
										54
									
								
								vendor/github.com/opentracing/opentracing-go/gocontext.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										54
									
								
								vendor/github.com/opentracing/opentracing-go/gocontext.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,54 @@ | ||||
| package opentracing | ||||
|  | ||||
| import "context" | ||||
|  | ||||
| type contextKey struct{} | ||||
|  | ||||
| var activeSpanKey = contextKey{} | ||||
|  | ||||
| // ContextWithSpan returns a new `context.Context` that holds a reference to | ||||
| // `span`'s SpanContext. | ||||
| func ContextWithSpan(ctx context.Context, span Span) context.Context { | ||||
| 	return context.WithValue(ctx, activeSpanKey, span) | ||||
| } | ||||
|  | ||||
| // SpanFromContext returns the `Span` previously associated with `ctx`, or | ||||
| // `nil` if no such `Span` could be found. | ||||
| // | ||||
| // NOTE: context.Context != SpanContext: the former is Go's intra-process | ||||
| // context propagation mechanism, and the latter houses OpenTracing's per-Span | ||||
| // identity and baggage information. | ||||
| func SpanFromContext(ctx context.Context) Span { | ||||
| 	val := ctx.Value(activeSpanKey) | ||||
| 	if sp, ok := val.(Span); ok { | ||||
| 		return sp | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // StartSpanFromContext starts and returns a Span with `operationName`, using | ||||
| // any Span found within `ctx` as a ChildOfRef. If no such parent could be | ||||
| // found, StartSpanFromContext creates a root (parentless) Span. | ||||
| // | ||||
| // The second return value is a context.Context object built around the | ||||
| // returned Span. | ||||
| // | ||||
| // Example usage: | ||||
| // | ||||
| //    SomeFunction(ctx context.Context, ...) { | ||||
| //        sp, ctx := opentracing.StartSpanFromContext(ctx, "SomeFunction") | ||||
| //        defer sp.Finish() | ||||
| //        ... | ||||
| //    } | ||||
| func StartSpanFromContext(ctx context.Context, operationName string, opts ...StartSpanOption) (Span, context.Context) { | ||||
| 	return startSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...) | ||||
| } | ||||
|  | ||||
| // startSpanFromContextWithTracer is factored out for testing purposes. | ||||
| func startSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) { | ||||
| 	if parentSpan := SpanFromContext(ctx); parentSpan != nil { | ||||
| 		opts = append(opts, ChildOf(parentSpan.Context())) | ||||
| 	} | ||||
| 	span := tracer.StartSpan(operationName, opts...) | ||||
| 	return span, ContextWithSpan(ctx, span) | ||||
| } | ||||
							
								
								
									
										269
									
								
								vendor/github.com/opentracing/opentracing-go/log/field.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										269
									
								
								vendor/github.com/opentracing/opentracing-go/log/field.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,269 @@ | ||||
| package log | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"math" | ||||
| ) | ||||
|  | ||||
| type fieldType int | ||||
|  | ||||
| const ( | ||||
| 	stringType fieldType = iota | ||||
| 	boolType | ||||
| 	intType | ||||
| 	int32Type | ||||
| 	uint32Type | ||||
| 	int64Type | ||||
| 	uint64Type | ||||
| 	float32Type | ||||
| 	float64Type | ||||
| 	errorType | ||||
| 	objectType | ||||
| 	lazyLoggerType | ||||
| 	noopType | ||||
| ) | ||||
|  | ||||
| // Field instances are constructed via LogBool, LogString, and so on. | ||||
| // Tracing implementations may then handle them via the Field.Marshal | ||||
| // method. | ||||
| // | ||||
| // "heavily influenced by" (i.e., partially stolen from) | ||||
| // https://github.com/uber-go/zap | ||||
| type Field struct { | ||||
| 	key          string | ||||
| 	fieldType    fieldType | ||||
| 	numericVal   int64 | ||||
| 	stringVal    string | ||||
| 	interfaceVal interface{} | ||||
| } | ||||
|  | ||||
| // String adds a string-valued key:value pair to a Span.LogFields() record | ||||
| func String(key, val string) Field { | ||||
| 	return Field{ | ||||
| 		key:       key, | ||||
| 		fieldType: stringType, | ||||
| 		stringVal: val, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Bool adds a bool-valued key:value pair to a Span.LogFields() record | ||||
| func Bool(key string, val bool) Field { | ||||
| 	var numericVal int64 | ||||
| 	if val { | ||||
| 		numericVal = 1 | ||||
| 	} | ||||
| 	return Field{ | ||||
| 		key:        key, | ||||
| 		fieldType:  boolType, | ||||
| 		numericVal: numericVal, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Int adds an int-valued key:value pair to a Span.LogFields() record | ||||
| func Int(key string, val int) Field { | ||||
| 	return Field{ | ||||
| 		key:        key, | ||||
| 		fieldType:  intType, | ||||
| 		numericVal: int64(val), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Int32 adds an int32-valued key:value pair to a Span.LogFields() record | ||||
| func Int32(key string, val int32) Field { | ||||
| 	return Field{ | ||||
| 		key:        key, | ||||
| 		fieldType:  int32Type, | ||||
| 		numericVal: int64(val), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Int64 adds an int64-valued key:value pair to a Span.LogFields() record | ||||
| func Int64(key string, val int64) Field { | ||||
| 	return Field{ | ||||
| 		key:        key, | ||||
| 		fieldType:  int64Type, | ||||
| 		numericVal: val, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Uint32 adds a uint32-valued key:value pair to a Span.LogFields() record | ||||
| func Uint32(key string, val uint32) Field { | ||||
| 	return Field{ | ||||
| 		key:        key, | ||||
| 		fieldType:  uint32Type, | ||||
| 		numericVal: int64(val), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record | ||||
| func Uint64(key string, val uint64) Field { | ||||
| 	return Field{ | ||||
| 		key:        key, | ||||
| 		fieldType:  uint64Type, | ||||
| 		numericVal: int64(val), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Float32 adds a float32-valued key:value pair to a Span.LogFields() record | ||||
| func Float32(key string, val float32) Field { | ||||
| 	return Field{ | ||||
| 		key:        key, | ||||
| 		fieldType:  float32Type, | ||||
| 		numericVal: int64(math.Float32bits(val)), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Float64 adds a float64-valued key:value pair to a Span.LogFields() record | ||||
| func Float64(key string, val float64) Field { | ||||
| 	return Field{ | ||||
| 		key:        key, | ||||
| 		fieldType:  float64Type, | ||||
| 		numericVal: int64(math.Float64bits(val)), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Error adds an error with the key "error" to a Span.LogFields() record | ||||
| func Error(err error) Field { | ||||
| 	return Field{ | ||||
| 		key:          "error", | ||||
| 		fieldType:    errorType, | ||||
| 		interfaceVal: err, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Object adds an object-valued key:value pair to a Span.LogFields() record | ||||
| func Object(key string, obj interface{}) Field { | ||||
| 	return Field{ | ||||
| 		key:          key, | ||||
| 		fieldType:    objectType, | ||||
| 		interfaceVal: obj, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // LazyLogger allows for user-defined, late-bound logging of arbitrary data | ||||
| type LazyLogger func(fv Encoder) | ||||
|  | ||||
| // Lazy adds a LazyLogger to a Span.LogFields() record; the tracing | ||||
| // implementation will call the LazyLogger function at an indefinite time in | ||||
| // the future (after Lazy() returns). | ||||
| func Lazy(ll LazyLogger) Field { | ||||
| 	return Field{ | ||||
| 		fieldType:    lazyLoggerType, | ||||
| 		interfaceVal: ll, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Noop creates a no-op log field that should be ignored by the tracer. | ||||
| // It can be used to capture optional fields, for example those that should | ||||
| // only be logged in non-production environment: | ||||
| // | ||||
| //     func customerField(order *Order) log.Field { | ||||
| //          if os.Getenv("ENVIRONMENT") == "dev" { | ||||
| //              return log.String("customer", order.Customer.ID) | ||||
| //          } | ||||
| //          return log.Noop() | ||||
| //     } | ||||
| // | ||||
| //     span.LogFields(log.String("event", "purchase"), customerField(order)) | ||||
| // | ||||
| func Noop() Field { | ||||
| 	return Field{ | ||||
| 		fieldType: noopType, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Encoder allows access to the contents of a Field (via a call to | ||||
| // Field.Marshal). | ||||
| // | ||||
| // Tracer implementations typically provide an implementation of Encoder; | ||||
| // OpenTracing callers typically do not need to concern themselves with it. | ||||
| type Encoder interface { | ||||
| 	EmitString(key, value string) | ||||
| 	EmitBool(key string, value bool) | ||||
| 	EmitInt(key string, value int) | ||||
| 	EmitInt32(key string, value int32) | ||||
| 	EmitInt64(key string, value int64) | ||||
| 	EmitUint32(key string, value uint32) | ||||
| 	EmitUint64(key string, value uint64) | ||||
| 	EmitFloat32(key string, value float32) | ||||
| 	EmitFloat64(key string, value float64) | ||||
| 	EmitObject(key string, value interface{}) | ||||
| 	EmitLazyLogger(value LazyLogger) | ||||
| } | ||||
|  | ||||
| // Marshal passes a Field instance through to the appropriate | ||||
| // field-type-specific method of an Encoder. | ||||
| func (lf Field) Marshal(visitor Encoder) { | ||||
| 	switch lf.fieldType { | ||||
| 	case stringType: | ||||
| 		visitor.EmitString(lf.key, lf.stringVal) | ||||
| 	case boolType: | ||||
| 		visitor.EmitBool(lf.key, lf.numericVal != 0) | ||||
| 	case intType: | ||||
| 		visitor.EmitInt(lf.key, int(lf.numericVal)) | ||||
| 	case int32Type: | ||||
| 		visitor.EmitInt32(lf.key, int32(lf.numericVal)) | ||||
| 	case int64Type: | ||||
| 		visitor.EmitInt64(lf.key, int64(lf.numericVal)) | ||||
| 	case uint32Type: | ||||
| 		visitor.EmitUint32(lf.key, uint32(lf.numericVal)) | ||||
| 	case uint64Type: | ||||
| 		visitor.EmitUint64(lf.key, uint64(lf.numericVal)) | ||||
| 	case float32Type: | ||||
| 		visitor.EmitFloat32(lf.key, math.Float32frombits(uint32(lf.numericVal))) | ||||
| 	case float64Type: | ||||
| 		visitor.EmitFloat64(lf.key, math.Float64frombits(uint64(lf.numericVal))) | ||||
| 	case errorType: | ||||
| 		if err, ok := lf.interfaceVal.(error); ok { | ||||
| 			visitor.EmitString(lf.key, err.Error()) | ||||
| 		} else { | ||||
| 			visitor.EmitString(lf.key, "<nil>") | ||||
| 		} | ||||
| 	case objectType: | ||||
| 		visitor.EmitObject(lf.key, lf.interfaceVal) | ||||
| 	case lazyLoggerType: | ||||
| 		visitor.EmitLazyLogger(lf.interfaceVal.(LazyLogger)) | ||||
| 	case noopType: | ||||
| 		// intentionally left blank | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Key returns the field's key. | ||||
| func (lf Field) Key() string { | ||||
| 	return lf.key | ||||
| } | ||||
|  | ||||
| // Value returns the field's value as interface{}. | ||||
| func (lf Field) Value() interface{} { | ||||
| 	switch lf.fieldType { | ||||
| 	case stringType: | ||||
| 		return lf.stringVal | ||||
| 	case boolType: | ||||
| 		return lf.numericVal != 0 | ||||
| 	case intType: | ||||
| 		return int(lf.numericVal) | ||||
| 	case int32Type: | ||||
| 		return int32(lf.numericVal) | ||||
| 	case int64Type: | ||||
| 		return int64(lf.numericVal) | ||||
| 	case uint32Type: | ||||
| 		return uint32(lf.numericVal) | ||||
| 	case uint64Type: | ||||
| 		return uint64(lf.numericVal) | ||||
| 	case float32Type: | ||||
| 		return math.Float32frombits(uint32(lf.numericVal)) | ||||
| 	case float64Type: | ||||
| 		return math.Float64frombits(uint64(lf.numericVal)) | ||||
| 	case errorType, objectType, lazyLoggerType: | ||||
| 		return lf.interfaceVal | ||||
| 	case noopType: | ||||
| 		return nil | ||||
| 	default: | ||||
| 		return nil | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // String returns a string representation of the key and value. | ||||
| func (lf Field) String() string { | ||||
| 	return fmt.Sprint(lf.key, ":", lf.Value()) | ||||
| } | ||||
							
								
								
									
										54
									
								
								vendor/github.com/opentracing/opentracing-go/log/util.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										54
									
								
								vendor/github.com/opentracing/opentracing-go/log/util.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,54 @@ | ||||
| package log | ||||
|  | ||||
| import "fmt" | ||||
|  | ||||
| // InterleavedKVToFields converts keyValues a la Span.LogKV() to a Field slice | ||||
| // a la Span.LogFields(). | ||||
| func InterleavedKVToFields(keyValues ...interface{}) ([]Field, error) { | ||||
| 	if len(keyValues)%2 != 0 { | ||||
| 		return nil, fmt.Errorf("non-even keyValues len: %d", len(keyValues)) | ||||
| 	} | ||||
| 	fields := make([]Field, len(keyValues)/2) | ||||
| 	for i := 0; i*2 < len(keyValues); i++ { | ||||
| 		key, ok := keyValues[i*2].(string) | ||||
| 		if !ok { | ||||
| 			return nil, fmt.Errorf( | ||||
| 				"non-string key (pair #%d): %T", | ||||
| 				i, keyValues[i*2]) | ||||
| 		} | ||||
| 		switch typedVal := keyValues[i*2+1].(type) { | ||||
| 		case bool: | ||||
| 			fields[i] = Bool(key, typedVal) | ||||
| 		case string: | ||||
| 			fields[i] = String(key, typedVal) | ||||
| 		case int: | ||||
| 			fields[i] = Int(key, typedVal) | ||||
| 		case int8: | ||||
| 			fields[i] = Int32(key, int32(typedVal)) | ||||
| 		case int16: | ||||
| 			fields[i] = Int32(key, int32(typedVal)) | ||||
| 		case int32: | ||||
| 			fields[i] = Int32(key, typedVal) | ||||
| 		case int64: | ||||
| 			fields[i] = Int64(key, typedVal) | ||||
| 		case uint: | ||||
| 			fields[i] = Uint64(key, uint64(typedVal)) | ||||
| 		case uint64: | ||||
| 			fields[i] = Uint64(key, typedVal) | ||||
| 		case uint8: | ||||
| 			fields[i] = Uint32(key, uint32(typedVal)) | ||||
| 		case uint16: | ||||
| 			fields[i] = Uint32(key, uint32(typedVal)) | ||||
| 		case uint32: | ||||
| 			fields[i] = Uint32(key, typedVal) | ||||
| 		case float32: | ||||
| 			fields[i] = Float32(key, typedVal) | ||||
| 		case float64: | ||||
| 			fields[i] = Float64(key, typedVal) | ||||
| 		default: | ||||
| 			// When in doubt, coerce to a string | ||||
| 			fields[i] = String(key, fmt.Sprint(typedVal)) | ||||
| 		} | ||||
| 	} | ||||
| 	return fields, nil | ||||
| } | ||||
							
								
								
									
										64
									
								
								vendor/github.com/opentracing/opentracing-go/noop.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										64
									
								
								vendor/github.com/opentracing/opentracing-go/noop.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,64 @@ | ||||
| package opentracing | ||||
|  | ||||
| import "github.com/opentracing/opentracing-go/log" | ||||
|  | ||||
| // A NoopTracer is a trivial, minimum overhead implementation of Tracer | ||||
| // for which all operations are no-ops. | ||||
| // | ||||
| // The primary use of this implementation is in libraries, such as RPC | ||||
| // frameworks, that make tracing an optional feature controlled by the | ||||
| // end user. A no-op implementation allows said libraries to use it | ||||
| // as the default Tracer and to write instrumentation that does | ||||
| // not need to keep checking if the tracer instance is nil. | ||||
| // | ||||
| // For the same reason, the NoopTracer is the default "global" tracer | ||||
| // (see GlobalTracer and SetGlobalTracer functions). | ||||
| // | ||||
| // WARNING: NoopTracer does not support baggage propagation. | ||||
| type NoopTracer struct{} | ||||
|  | ||||
| type noopSpan struct{} | ||||
| type noopSpanContext struct{} | ||||
|  | ||||
| var ( | ||||
| 	defaultNoopSpanContext = noopSpanContext{} | ||||
| 	defaultNoopSpan        = noopSpan{} | ||||
| 	defaultNoopTracer      = NoopTracer{} | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	emptyString = "" | ||||
| ) | ||||
|  | ||||
| // noopSpanContext: | ||||
| func (n noopSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {} | ||||
|  | ||||
| // noopSpan: | ||||
| func (n noopSpan) Context() SpanContext                                  { return defaultNoopSpanContext } | ||||
| func (n noopSpan) SetBaggageItem(key, val string) Span                   { return defaultNoopSpan } | ||||
| func (n noopSpan) BaggageItem(key string) string                         { return emptyString } | ||||
| func (n noopSpan) SetTag(key string, value interface{}) Span             { return n } | ||||
| func (n noopSpan) LogFields(fields ...log.Field)                         {} | ||||
| func (n noopSpan) LogKV(keyVals ...interface{})                          {} | ||||
| func (n noopSpan) Finish()                                               {} | ||||
| func (n noopSpan) FinishWithOptions(opts FinishOptions)                  {} | ||||
| func (n noopSpan) SetOperationName(operationName string) Span            { return n } | ||||
| func (n noopSpan) Tracer() Tracer                                        { return defaultNoopTracer } | ||||
| func (n noopSpan) LogEvent(event string)                                 {} | ||||
| func (n noopSpan) LogEventWithPayload(event string, payload interface{}) {} | ||||
| func (n noopSpan) Log(data LogData)                                      {} | ||||
|  | ||||
| // StartSpan belongs to the Tracer interface. | ||||
| func (n NoopTracer) StartSpan(operationName string, opts ...StartSpanOption) Span { | ||||
| 	return defaultNoopSpan | ||||
| } | ||||
|  | ||||
| // Inject belongs to the Tracer interface. | ||||
| func (n NoopTracer) Inject(sp SpanContext, format interface{}, carrier interface{}) error { | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Extract belongs to the Tracer interface. | ||||
| func (n NoopTracer) Extract(format interface{}, carrier interface{}) (SpanContext, error) { | ||||
| 	return nil, ErrSpanContextNotFound | ||||
| } | ||||
							
								
								
									
										176
									
								
								vendor/github.com/opentracing/opentracing-go/propagation.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										176
									
								
								vendor/github.com/opentracing/opentracing-go/propagation.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,176 @@ | ||||
| package opentracing | ||||
|  | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"net/http" | ||||
| ) | ||||
|  | ||||
| /////////////////////////////////////////////////////////////////////////////// | ||||
| // CORE PROPAGATION INTERFACES: | ||||
| /////////////////////////////////////////////////////////////////////////////// | ||||
|  | ||||
| var ( | ||||
| 	// ErrUnsupportedFormat occurs when the `format` passed to Tracer.Inject() or | ||||
| 	// Tracer.Extract() is not recognized by the Tracer implementation. | ||||
| 	ErrUnsupportedFormat = errors.New("opentracing: Unknown or unsupported Inject/Extract format") | ||||
|  | ||||
| 	// ErrSpanContextNotFound occurs when the `carrier` passed to | ||||
| 	// Tracer.Extract() is valid and uncorrupted but has insufficient | ||||
| 	// information to extract a SpanContext. | ||||
| 	ErrSpanContextNotFound = errors.New("opentracing: SpanContext not found in Extract carrier") | ||||
|  | ||||
| 	// ErrInvalidSpanContext errors occur when Tracer.Inject() is asked to | ||||
| 	// operate on a SpanContext which it is not prepared to handle (for | ||||
| 	// example, since it was created by a different tracer implementation). | ||||
| 	ErrInvalidSpanContext = errors.New("opentracing: SpanContext type incompatible with tracer") | ||||
|  | ||||
| 	// ErrInvalidCarrier errors occur when Tracer.Inject() or Tracer.Extract() | ||||
| 	// implementations expect a different type of `carrier` than they are | ||||
| 	// given. | ||||
| 	ErrInvalidCarrier = errors.New("opentracing: Invalid Inject/Extract carrier") | ||||
|  | ||||
| 	// ErrSpanContextCorrupted occurs when the `carrier` passed to | ||||
| 	// Tracer.Extract() is of the expected type but is corrupted. | ||||
| 	ErrSpanContextCorrupted = errors.New("opentracing: SpanContext data corrupted in Extract carrier") | ||||
| ) | ||||
|  | ||||
| /////////////////////////////////////////////////////////////////////////////// | ||||
| // BUILTIN PROPAGATION FORMATS: | ||||
| /////////////////////////////////////////////////////////////////////////////// | ||||
|  | ||||
| // BuiltinFormat is used to demarcate the values within package `opentracing` | ||||
| // that are intended for use with the Tracer.Inject() and Tracer.Extract() | ||||
| // methods. | ||||
| type BuiltinFormat byte | ||||
|  | ||||
| const ( | ||||
| 	// Binary represents SpanContexts as opaque binary data. | ||||
| 	// | ||||
| 	// For Tracer.Inject(): the carrier must be an `io.Writer`. | ||||
| 	// | ||||
| 	// For Tracer.Extract(): the carrier must be an `io.Reader`. | ||||
| 	Binary BuiltinFormat = iota | ||||
|  | ||||
| 	// TextMap represents SpanContexts as key:value string pairs. | ||||
| 	// | ||||
| 	// Unlike HTTPHeaders, the TextMap format does not restrict the key or | ||||
| 	// value character sets in any way. | ||||
| 	// | ||||
| 	// For Tracer.Inject(): the carrier must be a `TextMapWriter`. | ||||
| 	// | ||||
| 	// For Tracer.Extract(): the carrier must be a `TextMapReader`. | ||||
| 	TextMap | ||||
|  | ||||
| 	// HTTPHeaders represents SpanContexts as HTTP header string pairs. | ||||
| 	// | ||||
| 	// Unlike TextMap, the HTTPHeaders format requires that the keys and values | ||||
| 	// be valid as HTTP headers as-is (i.e., character casing may be unstable | ||||
| 	// and special characters are disallowed in keys, values should be | ||||
| 	// URL-escaped, etc). | ||||
| 	// | ||||
| 	// For Tracer.Inject(): the carrier must be a `TextMapWriter`. | ||||
| 	// | ||||
| 	// For Tracer.Extract(): the carrier must be a `TextMapReader`. | ||||
| 	// | ||||
| 	// See HTTPHeadersCarrier for an implementation of both TextMapWriter | ||||
| 	// and TextMapReader that defers to an http.Header instance for storage. | ||||
| 	// For example, Inject(): | ||||
| 	// | ||||
| 	//    carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) | ||||
| 	//    err := span.Tracer().Inject( | ||||
| 	//        span.Context(), opentracing.HTTPHeaders, carrier) | ||||
| 	// | ||||
| 	// Or Extract(): | ||||
| 	// | ||||
| 	//    carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) | ||||
| 	//    clientContext, err := tracer.Extract( | ||||
| 	//        opentracing.HTTPHeaders, carrier) | ||||
| 	// | ||||
| 	HTTPHeaders | ||||
| ) | ||||
|  | ||||
| // TextMapWriter is the Inject() carrier for the TextMap builtin format. With | ||||
| // it, the caller can encode a SpanContext for propagation as entries in a map | ||||
| // of unicode strings. | ||||
| type TextMapWriter interface { | ||||
| 	// Set a key:value pair to the carrier. Multiple calls to Set() for the | ||||
| 	// same key leads to undefined behavior. | ||||
| 	// | ||||
| 	// NOTE: The backing store for the TextMapWriter may contain data unrelated | ||||
| 	// to SpanContext. As such, Inject() and Extract() implementations that | ||||
| 	// call the TextMapWriter and TextMapReader interfaces must agree on a | ||||
| 	// prefix or other convention to distinguish their own key:value pairs. | ||||
| 	Set(key, val string) | ||||
| } | ||||
|  | ||||
| // TextMapReader is the Extract() carrier for the TextMap builtin format. With it, | ||||
| // the caller can decode a propagated SpanContext as entries in a map of | ||||
| // unicode strings. | ||||
| type TextMapReader interface { | ||||
| 	// ForeachKey returns TextMap contents via repeated calls to the `handler` | ||||
| 	// function. If any call to `handler` returns a non-nil error, ForeachKey | ||||
| 	// terminates and returns that error. | ||||
| 	// | ||||
| 	// NOTE: The backing store for the TextMapReader may contain data unrelated | ||||
| 	// to SpanContext. As such, Inject() and Extract() implementations that | ||||
| 	// call the TextMapWriter and TextMapReader interfaces must agree on a | ||||
| 	// prefix or other convention to distinguish their own key:value pairs. | ||||
| 	// | ||||
| 	// The "foreach" callback pattern reduces unnecessary copying in some cases | ||||
| 	// and also allows implementations to hold locks while the map is read. | ||||
| 	ForeachKey(handler func(key, val string) error) error | ||||
| } | ||||
|  | ||||
| // TextMapCarrier allows the use of regular map[string]string | ||||
| // as both TextMapWriter and TextMapReader. | ||||
| type TextMapCarrier map[string]string | ||||
|  | ||||
| // ForeachKey conforms to the TextMapReader interface. | ||||
| func (c TextMapCarrier) ForeachKey(handler func(key, val string) error) error { | ||||
| 	for k, v := range c { | ||||
| 		if err := handler(k, v); err != nil { | ||||
| 			return err | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // Set implements Set() of opentracing.TextMapWriter | ||||
| func (c TextMapCarrier) Set(key, val string) { | ||||
| 	c[key] = val | ||||
| } | ||||
|  | ||||
| // HTTPHeadersCarrier satisfies both TextMapWriter and TextMapReader. | ||||
| // | ||||
| // Example usage for server side: | ||||
| // | ||||
| //     carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) | ||||
| //     clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier) | ||||
| // | ||||
| // Example usage for client side: | ||||
| // | ||||
| //     carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) | ||||
| //     err := tracer.Inject( | ||||
| //         span.Context(), | ||||
| //         opentracing.HTTPHeaders, | ||||
| //         carrier) | ||||
| // | ||||
| type HTTPHeadersCarrier http.Header | ||||
|  | ||||
| // Set conforms to the TextMapWriter interface. | ||||
| func (c HTTPHeadersCarrier) Set(key, val string) { | ||||
| 	h := http.Header(c) | ||||
| 	h.Add(key, val) | ||||
| } | ||||
|  | ||||
| // ForeachKey conforms to the TextMapReader interface. | ||||
| func (c HTTPHeadersCarrier) ForeachKey(handler func(key, val string) error) error { | ||||
| 	for k, vals := range c { | ||||
| 		for _, v := range vals { | ||||
| 			if err := handler(k, v); err != nil { | ||||
| 				return err | ||||
| 			} | ||||
| 		} | ||||
| 	} | ||||
| 	return nil | ||||
| } | ||||
							
								
								
									
										189
									
								
								vendor/github.com/opentracing/opentracing-go/span.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										189
									
								
								vendor/github.com/opentracing/opentracing-go/span.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,189 @@ | ||||
| package opentracing | ||||
|  | ||||
| import ( | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/opentracing/opentracing-go/log" | ||||
| ) | ||||
|  | ||||
| // SpanContext represents Span state that must propagate to descendant Spans and across process | ||||
| // boundaries (e.g., a <trace_id, span_id, sampled> tuple). | ||||
| type SpanContext interface { | ||||
| 	// ForeachBaggageItem grants access to all baggage items stored in the | ||||
| 	// SpanContext. | ||||
| 	// The handler function will be called for each baggage key/value pair. | ||||
| 	// The ordering of items is not guaranteed. | ||||
| 	// | ||||
| 	// The bool return value indicates if the handler wants to continue iterating | ||||
| 	// through the rest of the baggage items; for example if the handler is trying to | ||||
| 	// find some baggage item by pattern matching the name, it can return false | ||||
| 	// as soon as the item is found to stop further iterations. | ||||
| 	ForeachBaggageItem(handler func(k, v string) bool) | ||||
| } | ||||
|  | ||||
| // Span represents an active, un-finished span in the OpenTracing system. | ||||
| // | ||||
| // Spans are created by the Tracer interface. | ||||
| type Span interface { | ||||
| 	// Sets the end timestamp and finalizes Span state. | ||||
| 	// | ||||
| 	// With the exception of calls to Context() (which are always allowed), | ||||
| 	// Finish() must be the last call made to any span instance, and to do | ||||
| 	// otherwise leads to undefined behavior. | ||||
| 	Finish() | ||||
| 	// FinishWithOptions is like Finish() but with explicit control over | ||||
| 	// timestamps and log data. | ||||
| 	FinishWithOptions(opts FinishOptions) | ||||
|  | ||||
| 	// Context() yields the SpanContext for this Span. Note that the return | ||||
| 	// value of Context() is still valid after a call to Span.Finish(), as is | ||||
| 	// a call to Span.Context() after a call to Span.Finish(). | ||||
| 	Context() SpanContext | ||||
|  | ||||
| 	// Sets or changes the operation name. | ||||
| 	// | ||||
| 	// Returns a reference to this Span for chaining. | ||||
| 	SetOperationName(operationName string) Span | ||||
|  | ||||
| 	// Adds a tag to the span. | ||||
| 	// | ||||
| 	// If there is a pre-existing tag set for `key`, it is overwritten. | ||||
| 	// | ||||
| 	// Tag values can be numeric types, strings, or bools. The behavior of | ||||
| 	// other tag value types is undefined at the OpenTracing level. If a | ||||
| 	// tracing system does not know how to handle a particular value type, it | ||||
| 	// may ignore the tag, but shall not panic. | ||||
| 	// | ||||
| 	// Returns a reference to this Span for chaining. | ||||
| 	SetTag(key string, value interface{}) Span | ||||
|  | ||||
| 	// LogFields is an efficient and type-checked way to record key:value | ||||
| 	// logging data about a Span, though the programming interface is a little | ||||
| 	// more verbose than LogKV(). Here's an example: | ||||
| 	// | ||||
| 	//    span.LogFields( | ||||
| 	//        log.String("event", "soft error"), | ||||
| 	//        log.String("type", "cache timeout"), | ||||
| 	//        log.Int("waited.millis", 1500)) | ||||
| 	// | ||||
| 	// Also see Span.FinishWithOptions() and FinishOptions.BulkLogData. | ||||
| 	LogFields(fields ...log.Field) | ||||
|  | ||||
| 	// LogKV is a concise, readable way to record key:value logging data about | ||||
| 	// a Span, though unfortunately this also makes it less efficient and less | ||||
| 	// type-safe than LogFields(). Here's an example: | ||||
| 	// | ||||
| 	//    span.LogKV( | ||||
| 	//        "event", "soft error", | ||||
| 	//        "type", "cache timeout", | ||||
| 	//        "waited.millis", 1500) | ||||
| 	// | ||||
| 	// For LogKV (as opposed to LogFields()), the parameters must appear as | ||||
| 	// key-value pairs, like | ||||
| 	// | ||||
| 	//    span.LogKV(key1, val1, key2, val2, key3, val3, ...) | ||||
| 	// | ||||
| 	// The keys must all be strings. The values may be strings, numeric types, | ||||
| 	// bools, Go error instances, or arbitrary structs. | ||||
| 	// | ||||
| 	// (Note to implementors: consider the log.InterleavedKVToFields() helper) | ||||
| 	LogKV(alternatingKeyValues ...interface{}) | ||||
|  | ||||
| 	// SetBaggageItem sets a key:value pair on this Span and its SpanContext | ||||
| 	// that also propagates to descendants of this Span. | ||||
| 	// | ||||
| 	// SetBaggageItem() enables powerful functionality given a full-stack | ||||
| 	// opentracing integration (e.g., arbitrary application data from a mobile | ||||
| 	// app can make it, transparently, all the way into the depths of a storage | ||||
| 	// system), and with it some powerful costs: use this feature with care. | ||||
| 	// | ||||
| 	// IMPORTANT NOTE #1: SetBaggageItem() will only propagate baggage items to | ||||
| 	// *future* causal descendants of the associated Span. | ||||
| 	// | ||||
| 	// IMPORTANT NOTE #2: Use this thoughtfully and with care. Every key and | ||||
| 	// value is copied into every local *and remote* child of the associated | ||||
| 	// Span, and that can add up to a lot of network and cpu overhead. | ||||
| 	// | ||||
| 	// Returns a reference to this Span for chaining. | ||||
| 	SetBaggageItem(restrictedKey, value string) Span | ||||
|  | ||||
| 	// Gets the value for a baggage item given its key. Returns the empty string | ||||
| 	// if the value isn't found in this Span. | ||||
| 	BaggageItem(restrictedKey string) string | ||||
|  | ||||
| 	// Provides access to the Tracer that created this Span. | ||||
| 	Tracer() Tracer | ||||
|  | ||||
| 	// Deprecated: use LogFields or LogKV | ||||
| 	LogEvent(event string) | ||||
| 	// Deprecated: use LogFields or LogKV | ||||
| 	LogEventWithPayload(event string, payload interface{}) | ||||
| 	// Deprecated: use LogFields or LogKV | ||||
| 	Log(data LogData) | ||||
| } | ||||
|  | ||||
| // LogRecord is data associated with a single Span log. Every LogRecord | ||||
| // instance must specify at least one Field. | ||||
| type LogRecord struct { | ||||
| 	Timestamp time.Time | ||||
| 	Fields    []log.Field | ||||
| } | ||||
|  | ||||
| // FinishOptions allows Span.FinishWithOptions callers to override the finish | ||||
| // timestamp and provide log data via a bulk interface. | ||||
| type FinishOptions struct { | ||||
| 	// FinishTime overrides the Span's finish time, or implicitly becomes | ||||
| 	// time.Now() if FinishTime.IsZero(). | ||||
| 	// | ||||
| 	// FinishTime must resolve to a timestamp that's >= the Span's StartTime | ||||
| 	// (per StartSpanOptions). | ||||
| 	FinishTime time.Time | ||||
|  | ||||
| 	// LogRecords allows the caller to specify the contents of many LogFields() | ||||
| 	// calls with a single slice. May be nil. | ||||
| 	// | ||||
| 	// None of the LogRecord.Timestamp values may be .IsZero() (i.e., they must | ||||
| 	// be set explicitly). Also, they must be >= the Span's start timestamp and | ||||
| 	// <= the FinishTime (or time.Now() if FinishTime.IsZero()). Otherwise the | ||||
| 	// behavior of FinishWithOptions() is undefined. | ||||
| 	// | ||||
| 	// If specified, the caller hands off ownership of LogRecords at | ||||
| 	// FinishWithOptions() invocation time. | ||||
| 	// | ||||
| 	// If specified, the (deprecated) BulkLogData must be nil or empty. | ||||
| 	LogRecords []LogRecord | ||||
|  | ||||
| 	// BulkLogData is DEPRECATED. | ||||
| 	BulkLogData []LogData | ||||
| } | ||||
|  | ||||
| // LogData is DEPRECATED | ||||
| type LogData struct { | ||||
| 	Timestamp time.Time | ||||
| 	Event     string | ||||
| 	Payload   interface{} | ||||
| } | ||||
|  | ||||
| // ToLogRecord converts a deprecated LogData to a non-deprecated LogRecord | ||||
| func (ld *LogData) ToLogRecord() LogRecord { | ||||
| 	var literalTimestamp time.Time | ||||
| 	if ld.Timestamp.IsZero() { | ||||
| 		literalTimestamp = time.Now() | ||||
| 	} else { | ||||
| 		literalTimestamp = ld.Timestamp | ||||
| 	} | ||||
| 	rval := LogRecord{ | ||||
| 		Timestamp: literalTimestamp, | ||||
| 	} | ||||
| 	if ld.Payload == nil { | ||||
| 		rval.Fields = []log.Field{ | ||||
| 			log.String("event", ld.Event), | ||||
| 		} | ||||
| 	} else { | ||||
| 		rval.Fields = []log.Field{ | ||||
| 			log.String("event", ld.Event), | ||||
| 			log.Object("payload", ld.Payload), | ||||
| 		} | ||||
| 	} | ||||
| 	return rval | ||||
| } | ||||
							
								
								
									
										305
									
								
								vendor/github.com/opentracing/opentracing-go/tracer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										305
									
								
								vendor/github.com/opentracing/opentracing-go/tracer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,305 @@ | ||||
| package opentracing | ||||
|  | ||||
| import "time" | ||||
|  | ||||
| // Tracer is a simple, thin interface for Span creation and SpanContext | ||||
| // propagation. | ||||
| type Tracer interface { | ||||
|  | ||||
| 	// Create, start, and return a new Span with the given `operationName` and | ||||
| 	// incorporate the given StartSpanOption `opts`. (Note that `opts` borrows | ||||
| 	// from the "functional options" pattern, per | ||||
| 	// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis) | ||||
| 	// | ||||
| 	// A Span with no SpanReference options (e.g., opentracing.ChildOf() or | ||||
| 	// opentracing.FollowsFrom()) becomes the root of its own trace. | ||||
| 	// | ||||
| 	// Examples: | ||||
| 	// | ||||
| 	//     var tracer opentracing.Tracer = ... | ||||
| 	// | ||||
| 	//     // The root-span case: | ||||
| 	//     sp := tracer.StartSpan("GetFeed") | ||||
| 	// | ||||
| 	//     // The vanilla child span case: | ||||
| 	//     sp := tracer.StartSpan( | ||||
| 	//         "GetFeed", | ||||
| 	//         opentracing.ChildOf(parentSpan.Context())) | ||||
| 	// | ||||
| 	//     // All the bells and whistles: | ||||
| 	//     sp := tracer.StartSpan( | ||||
| 	//         "GetFeed", | ||||
| 	//         opentracing.ChildOf(parentSpan.Context()), | ||||
| 	//         opentracing.Tag{"user_agent", loggedReq.UserAgent}, | ||||
| 	//         opentracing.StartTime(loggedReq.Timestamp), | ||||
| 	//     ) | ||||
| 	// | ||||
| 	StartSpan(operationName string, opts ...StartSpanOption) Span | ||||
|  | ||||
| 	// Inject() takes the `sm` SpanContext instance and injects it for | ||||
| 	// propagation within `carrier`. The actual type of `carrier` depends on | ||||
| 	// the value of `format`. | ||||
| 	// | ||||
| 	// OpenTracing defines a common set of `format` values (see BuiltinFormat), | ||||
| 	// and each has an expected carrier type. | ||||
| 	// | ||||
| 	// Other packages may declare their own `format` values, much like the keys | ||||
| 	// used by `context.Context` (see | ||||
| 	// https://godoc.org/golang.org/x/net/context#WithValue). | ||||
| 	// | ||||
| 	// Example usage (sans error handling): | ||||
| 	// | ||||
| 	//     carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) | ||||
| 	//     err := tracer.Inject( | ||||
| 	//         span.Context(), | ||||
| 	//         opentracing.HTTPHeaders, | ||||
| 	//         carrier) | ||||
| 	// | ||||
| 	// NOTE: All opentracing.Tracer implementations MUST support all | ||||
| 	// BuiltinFormats. | ||||
| 	// | ||||
| 	// Implementations may return opentracing.ErrUnsupportedFormat if `format` | ||||
| 	// is not supported by (or not known by) the implementation. | ||||
| 	// | ||||
| 	// Implementations may return opentracing.ErrInvalidCarrier or any other | ||||
| 	// implementation-specific error if the format is supported but injection | ||||
| 	// fails anyway. | ||||
| 	// | ||||
| 	// See Tracer.Extract(). | ||||
| 	Inject(sm SpanContext, format interface{}, carrier interface{}) error | ||||
|  | ||||
| 	// Extract() returns a SpanContext instance given `format` and `carrier`. | ||||
| 	// | ||||
| 	// OpenTracing defines a common set of `format` values (see BuiltinFormat), | ||||
| 	// and each has an expected carrier type. | ||||
| 	// | ||||
| 	// Other packages may declare their own `format` values, much like the keys | ||||
| 	// used by `context.Context` (see | ||||
| 	// https://godoc.org/golang.org/x/net/context#WithValue). | ||||
| 	// | ||||
| 	// Example usage (with StartSpan): | ||||
| 	// | ||||
| 	// | ||||
| 	//     carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) | ||||
| 	//     clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier) | ||||
| 	// | ||||
| 	//     // ... assuming the ultimate goal here is to resume the trace with a | ||||
| 	//     // server-side Span: | ||||
| 	//     var serverSpan opentracing.Span | ||||
| 	//     if err == nil { | ||||
| 	//         span = tracer.StartSpan( | ||||
| 	//             rpcMethodName, ext.RPCServerOption(clientContext)) | ||||
| 	//     } else { | ||||
| 	//         span = tracer.StartSpan(rpcMethodName) | ||||
| 	//     } | ||||
| 	// | ||||
| 	// | ||||
| 	// NOTE: All opentracing.Tracer implementations MUST support all | ||||
| 	// BuiltinFormats. | ||||
| 	// | ||||
| 	// Return values: | ||||
| 	//  - A successful Extract returns a SpanContext instance and a nil error | ||||
| 	//  - If there was simply no SpanContext to extract in `carrier`, Extract() | ||||
| 	//    returns (nil, opentracing.ErrSpanContextNotFound) | ||||
| 	//  - If `format` is unsupported or unrecognized, Extract() returns (nil, | ||||
| 	//    opentracing.ErrUnsupportedFormat) | ||||
| 	//  - If there are more fundamental problems with the `carrier` object, | ||||
| 	//    Extract() may return opentracing.ErrInvalidCarrier, | ||||
| 	//    opentracing.ErrSpanContextCorrupted, or implementation-specific | ||||
| 	//    errors. | ||||
| 	// | ||||
| 	// See Tracer.Inject(). | ||||
| 	Extract(format interface{}, carrier interface{}) (SpanContext, error) | ||||
| } | ||||
|  | ||||
| // StartSpanOptions allows Tracer.StartSpan() callers and implementors a | ||||
| // mechanism to override the start timestamp, specify Span References, and make | ||||
| // a single Tag or multiple Tags available at Span start time. | ||||
| // | ||||
| // StartSpan() callers should look at the StartSpanOption interface and | ||||
| // implementations available in this package. | ||||
| // | ||||
| // Tracer implementations can convert a slice of `StartSpanOption` instances | ||||
| // into a `StartSpanOptions` struct like so: | ||||
| // | ||||
| //     func StartSpan(opName string, opts ...opentracing.StartSpanOption) { | ||||
| //         sso := opentracing.StartSpanOptions{} | ||||
| //         for _, o := range opts { | ||||
| //             o.Apply(&sso) | ||||
| //         } | ||||
| //         ... | ||||
| //     } | ||||
| // | ||||
| type StartSpanOptions struct { | ||||
| 	// Zero or more causal references to other Spans (via their SpanContext). | ||||
| 	// If empty, start a "root" Span (i.e., start a new trace). | ||||
| 	References []SpanReference | ||||
|  | ||||
| 	// StartTime overrides the Span's start time, or implicitly becomes | ||||
| 	// time.Now() if StartTime.IsZero(). | ||||
| 	StartTime time.Time | ||||
|  | ||||
| 	// Tags may have zero or more entries; the restrictions on map values are | ||||
| 	// identical to those for Span.SetTag(). May be nil. | ||||
| 	// | ||||
| 	// If specified, the caller hands off ownership of Tags at | ||||
| 	// StartSpan() invocation time. | ||||
| 	Tags map[string]interface{} | ||||
| } | ||||
|  | ||||
| // StartSpanOption instances (zero or more) may be passed to Tracer.StartSpan. | ||||
| // | ||||
| // StartSpanOption borrows from the "functional options" pattern, per | ||||
| // http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis | ||||
| type StartSpanOption interface { | ||||
| 	Apply(*StartSpanOptions) | ||||
| } | ||||
|  | ||||
| // SpanReferenceType is an enum type describing different categories of | ||||
| // relationships between two Spans. If Span-2 refers to Span-1, the | ||||
| // SpanReferenceType describes Span-1 from Span-2's perspective. For example, | ||||
| // ChildOfRef means that Span-1 created Span-2. | ||||
| // | ||||
| // NOTE: Span-1 and Span-2 do *not* necessarily depend on each other for | ||||
| // completion; e.g., Span-2 may be part of a background job enqueued by Span-1, | ||||
| // or Span-2 may be sitting in a distributed queue behind Span-1. | ||||
| type SpanReferenceType int | ||||
|  | ||||
| const ( | ||||
| 	// ChildOfRef refers to a parent Span that caused *and* somehow depends | ||||
| 	// upon the new child Span. Often (but not always), the parent Span cannot | ||||
| 	// finish until the child Span does. | ||||
| 	// | ||||
| 	// An timing diagram for a ChildOfRef that's blocked on the new Span: | ||||
| 	// | ||||
| 	//     [-Parent Span---------] | ||||
| 	//          [-Child Span----] | ||||
| 	// | ||||
| 	// See http://opentracing.io/spec/ | ||||
| 	// | ||||
| 	// See opentracing.ChildOf() | ||||
| 	ChildOfRef SpanReferenceType = iota | ||||
|  | ||||
| 	// FollowsFromRef refers to a parent Span that does not depend in any way | ||||
| 	// on the result of the new child Span. For instance, one might use | ||||
| 	// FollowsFromRefs to describe pipeline stages separated by queues, | ||||
| 	// or a fire-and-forget cache insert at the tail end of a web request. | ||||
| 	// | ||||
| 	// A FollowsFromRef Span is part of the same logical trace as the new Span: | ||||
| 	// i.e., the new Span is somehow caused by the work of its FollowsFromRef. | ||||
| 	// | ||||
| 	// All of the following could be valid timing diagrams for children that | ||||
| 	// "FollowFrom" a parent. | ||||
| 	// | ||||
| 	//     [-Parent Span-]  [-Child Span-] | ||||
| 	// | ||||
| 	// | ||||
| 	//     [-Parent Span--] | ||||
| 	//      [-Child Span-] | ||||
| 	// | ||||
| 	// | ||||
| 	//     [-Parent Span-] | ||||
| 	//                 [-Child Span-] | ||||
| 	// | ||||
| 	// See http://opentracing.io/spec/ | ||||
| 	// | ||||
| 	// See opentracing.FollowsFrom() | ||||
| 	FollowsFromRef | ||||
| ) | ||||
|  | ||||
| // SpanReference is a StartSpanOption that pairs a SpanReferenceType and a | ||||
| // referenced SpanContext. See the SpanReferenceType documentation for | ||||
| // supported relationships.  If SpanReference is created with | ||||
| // ReferencedContext==nil, it has no effect. Thus it allows for a more concise | ||||
| // syntax for starting spans: | ||||
| // | ||||
| //     sc, _ := tracer.Extract(someFormat, someCarrier) | ||||
| //     span := tracer.StartSpan("operation", opentracing.ChildOf(sc)) | ||||
| // | ||||
| // The `ChildOf(sc)` option above will not panic if sc == nil, it will just | ||||
| // not add the parent span reference to the options. | ||||
| type SpanReference struct { | ||||
| 	Type              SpanReferenceType | ||||
| 	ReferencedContext SpanContext | ||||
| } | ||||
|  | ||||
| // Apply satisfies the StartSpanOption interface. | ||||
| func (r SpanReference) Apply(o *StartSpanOptions) { | ||||
| 	if r.ReferencedContext != nil { | ||||
| 		o.References = append(o.References, r) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // ChildOf returns a StartSpanOption pointing to a dependent parent span. | ||||
| // If sc == nil, the option has no effect. | ||||
| // | ||||
| // See ChildOfRef, SpanReference | ||||
| func ChildOf(sc SpanContext) SpanReference { | ||||
| 	return SpanReference{ | ||||
| 		Type:              ChildOfRef, | ||||
| 		ReferencedContext: sc, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // FollowsFrom returns a StartSpanOption pointing to a parent Span that caused | ||||
| // the child Span but does not directly depend on its result in any way. | ||||
| // If sc == nil, the option has no effect. | ||||
| // | ||||
| // See FollowsFromRef, SpanReference | ||||
| func FollowsFrom(sc SpanContext) SpanReference { | ||||
| 	return SpanReference{ | ||||
| 		Type:              FollowsFromRef, | ||||
| 		ReferencedContext: sc, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // StartTime is a StartSpanOption that sets an explicit start timestamp for the | ||||
| // new Span. | ||||
| type StartTime time.Time | ||||
|  | ||||
| // Apply satisfies the StartSpanOption interface. | ||||
| func (t StartTime) Apply(o *StartSpanOptions) { | ||||
| 	o.StartTime = time.Time(t) | ||||
| } | ||||
|  | ||||
| // Tags are a generic map from an arbitrary string key to an opaque value type. | ||||
| // The underlying tracing system is responsible for interpreting and | ||||
| // serializing the values. | ||||
| type Tags map[string]interface{} | ||||
|  | ||||
| // Apply satisfies the StartSpanOption interface. | ||||
| func (t Tags) Apply(o *StartSpanOptions) { | ||||
| 	if o.Tags == nil { | ||||
| 		o.Tags = make(map[string]interface{}) | ||||
| 	} | ||||
| 	for k, v := range t { | ||||
| 		o.Tags[k] = v | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Tag may be passed as a StartSpanOption to add a tag to new spans, | ||||
| // or its Set method may be used to apply the tag to an existing Span, | ||||
| // for example: | ||||
| // | ||||
| // tracer.StartSpan("opName", Tag{"Key", value}) | ||||
| // | ||||
| //   or | ||||
| // | ||||
| // Tag{"key", value}.Set(span) | ||||
| type Tag struct { | ||||
| 	Key   string | ||||
| 	Value interface{} | ||||
| } | ||||
|  | ||||
| // Apply satisfies the StartSpanOption interface. | ||||
| func (t Tag) Apply(o *StartSpanOptions) { | ||||
| 	if o.Tags == nil { | ||||
| 		o.Tags = make(map[string]interface{}) | ||||
| 	} | ||||
| 	o.Tags[t.Key] = t.Value | ||||
| } | ||||
|  | ||||
| // Set applies the tag to an existing Span. | ||||
| func (t Tag) Set(s Span) { | ||||
| 	s.SetTag(t.Key, t.Value) | ||||
| } | ||||
							
								
								
									
										186
									
								
								vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										186
									
								
								vendor/github.com/uber/jaeger-client-go/CHANGELOG.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,186 @@ | ||||
| Changes by Version | ||||
| ================== | ||||
|  | ||||
| 2.15.0 (unreleased) | ||||
| ------------------- | ||||
|  | ||||
| - nothing yet | ||||
|  | ||||
|  | ||||
| 2.14.0 (2018-04-30) | ||||
| ------------------- | ||||
|  | ||||
| - Support throttling for debug traces (#274) <Isaac Hier> | ||||
| - Remove dependency on Apache Thrift (#303) <Yuri Shkuro> | ||||
| - Remove dependency on tchannel  (#295) (#294) <Yuri Shkuro> | ||||
| - Test with Go 1.9 (#298) <Yuri Shkuro> | ||||
|  | ||||
|  | ||||
| 2.13.0 (2018-04-15) | ||||
| ------------------- | ||||
|  | ||||
| - Use value receiver for config.NewTracer() (#283) <Yuri Shkuro> | ||||
| - Lock span during jaeger thrift conversion (#273) <Won Jun Jang> | ||||
| - Fix the RemotelyControlledSampler so that it terminates go-routine on Close() (#260) <Scott Kidder> <Yuri Shkuro> | ||||
| - Added support for client configuration via env vars (#275) <Juraci Paixão Kröhling> | ||||
| - Allow overriding sampler in the Config (#270) <Mike Kabischev> | ||||
|  | ||||
|  | ||||
| 2.12.0 (2018-03-14) | ||||
| ------------------- | ||||
|  | ||||
| - Use lock when retrieving span.Context() (#268) | ||||
| - Add Configuration support for custom Injector and Extractor (#263) <Martin Liu> | ||||
|  | ||||
|  | ||||
| 2.11.2 (2018-01-12) | ||||
| ------------------- | ||||
|  | ||||
| - Add Gopkg.toml to allow using the lib with `dep` | ||||
|  | ||||
|  | ||||
| 2.11.1 (2018-01-03) | ||||
| ------------------- | ||||
|  | ||||
| - Do not enqueue spans after Reporter is closed (#235, #245) | ||||
| - Change default flush interval to 1sec (#243) | ||||
|  | ||||
|  | ||||
| 2.11.0 (2017-11-27) | ||||
| ------------------- | ||||
|  | ||||
| - Normalize metric names and tags to be compatible with Prometheus (#222) | ||||
|  | ||||
|  | ||||
| 2.10.0 (2017-11-14) | ||||
| ------------------- | ||||
|  | ||||
| - Support custom tracing headers (#176) | ||||
| - Add BaggageRestrictionManager (#178) and RemoteBaggageRestrictionManager (#182) | ||||
| - Do not coerce baggage keys to lower case (#196) | ||||
| - Log span name when span cannot be reported (#198) | ||||
| - Add option to enable gen128Bit for tracer (#193) and allow custom generator for high bits of trace ID (#219) | ||||
|  | ||||
|  | ||||
| 2.9.0 (2017-07-29) | ||||
| ------------------ | ||||
|  | ||||
| - Pin thrift <= 0.10 (#179) | ||||
| - Introduce a parallel interface ContribObserver (#159) | ||||
|  | ||||
|  | ||||
| 2.8.0 (2017-07-05) | ||||
| ------------------ | ||||
|  | ||||
| - Drop `jaeger.` prefix from `jaeger.hostname` process-level tag | ||||
| - Add options to set tracer tags | ||||
|  | ||||
|  | ||||
| 2.7.0 (2017-06-21) | ||||
| ------------------ | ||||
|  | ||||
| - Fix rate limiter balance [#135](https://github.com/uber/jaeger-client-go/pull/135) [#140](https://github.com/uber/jaeger-client-go/pull/140) | ||||
| - Default client to send Jaeger.thrift [#147](https://github.com/uber/jaeger-client-go/pull/147) | ||||
| - Save baggage in span [#153](https://github.com/uber/jaeger-client-go/pull/153) | ||||
| - Move reporter.queueLength to the top of the struct to guarantee 64bit alignment [#158](https://github.com/uber/jaeger-client-go/pull/158) | ||||
| - Support HTTP transport with jaeger.thrift [#161](https://github.com/uber/jaeger-client-go/pull/161) | ||||
|  | ||||
|  | ||||
| 2.6.0 (2017-03-28) | ||||
| ------------------ | ||||
|  | ||||
| - Add config option to initialize RPC Metrics feature | ||||
|  | ||||
|  | ||||
| 2.5.0 (2017-03-23) | ||||
| ------------------ | ||||
|  | ||||
| - Split request latency metric by success/failure [#123](https://github.com/uber/jaeger-client-go/pull/123) | ||||
| - Add mutex to adaptive sampler and fix race condition [#124](https://github.com/uber/jaeger-client-go/pull/124) | ||||
| - Fix rate limiter panic [#125](https://github.com/uber/jaeger-client-go/pull/125) | ||||
|  | ||||
|  | ||||
| 2.4.0 (2017-03-21) | ||||
| ------------------ | ||||
|  | ||||
| - Remove `_ms` suffix from request latency metric name [#121](https://github.com/uber/jaeger-client-go/pull/121) | ||||
| - Rename all metrics to "request" and "http_request" and use tags for other dimensions [#121](https://github.com/uber/jaeger-client-go/pull/121) | ||||
|  | ||||
|  | ||||
| 2.3.0 (2017-03-20) | ||||
| ------------------ | ||||
|  | ||||
| - Make Span type public to allow access to non-std methods for testing [#117](https://github.com/uber/jaeger-client-go/pull/117) | ||||
| - Add a structured way to extract traces for logging with zap [#118](https://github.com/uber/jaeger-client-go/pull/118) | ||||
|  | ||||
|  | ||||
| 2.2.1 (2017-03-14) | ||||
| ------------------ | ||||
|  | ||||
| - Fix panic caused by updating the remote sampler from adaptive sampler to any other sampler type (https://github.com/uber/jaeger-client-go/pull/111) | ||||
|  | ||||
|  | ||||
| 2.2.0 (2017-03-10) | ||||
| ------------------ | ||||
|  | ||||
| - Introduce Observer and SpanObserver (https://github.com/uber/jaeger-client-go/pull/94) | ||||
| - Add RPC metrics emitter as Observer/SpanObserver (https://github.com/uber/jaeger-client-go/pull/103) | ||||
|  | ||||
|  | ||||
| 2.1.2 (2017-02-27) | ||||
| ------------------- | ||||
|  | ||||
| - Fix leaky bucket bug (https://github.com/uber/jaeger-client-go/pull/99) | ||||
| - Fix zap logger Infof (https://github.com/uber/jaeger-client-go/pull/100) | ||||
| - Add tracer initialization godoc examples | ||||
|  | ||||
|  | ||||
| 2.1.1 (2017-02-21) | ||||
| ------------------- | ||||
|  | ||||
| - Fix inefficient usage of zap.Logger | ||||
|  | ||||
|  | ||||
| 2.1.0 (2017-02-17) | ||||
| ------------------- | ||||
|  | ||||
| - Add adapter for zap.Logger (https://github.com/uber-go/zap) | ||||
| - Move logging API to ./log/ package | ||||
|  | ||||
|  | ||||
| 2.0.0 (2017-02-08) | ||||
| ------------------- | ||||
|  | ||||
| - Support Adaptive Sampling | ||||
| - Support 128bit Trace IDs | ||||
| - Change trace/span IDs from uint64 to strong types TraceID and SpanID | ||||
| - Add Zipkin HTTP B3 Propagation format support #72 | ||||
| - Rip out existing metrics and use github.com/uber/jaeger-lib/metrics | ||||
| - Change API for tracer, reporter, sampler initialization | ||||
|  | ||||
|  | ||||
| 1.6.0 (2016-10-14) | ||||
| ------------------- | ||||
|  | ||||
| - Add Zipkin HTTP transport | ||||
| - Support external baggage via jaeger-baggage header | ||||
| - Unpin Thrift version, keep to master | ||||
|  | ||||
|  | ||||
| 1.5.1 (2016-09-27) | ||||
| ------------------- | ||||
|  | ||||
| - Relax dependency on opentracing to ^1 | ||||
|  | ||||
|  | ||||
| 1.5.0 (2016-09-27) | ||||
| ------------------- | ||||
|  | ||||
| - Upgrade to opentracing-go 1.0 | ||||
| - Support KV logging for Spans | ||||
|  | ||||
|  | ||||
| 1.4.0 (2016-09-14) | ||||
| ------------------- | ||||
|  | ||||
| - Support debug traces via HTTP header "jaeger-debug-id" | ||||
							
								
								
									
										170
									
								
								vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										170
									
								
								vendor/github.com/uber/jaeger-client-go/CONTRIBUTING.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,170 @@ | ||||
| # How to Contribute to Jaeger | ||||
|  | ||||
| We'd love your help! | ||||
|  | ||||
| Jaeger is [Apache 2.0 licensed](LICENSE) and accepts contributions via GitHub | ||||
| pull requests. This document outlines some of the conventions on development | ||||
| workflow, commit message formatting, contact points and other resources to make | ||||
| it easier to get your contribution accepted. | ||||
|  | ||||
| We gratefully welcome improvements to documentation as well as to code. | ||||
|  | ||||
| # Certificate of Origin | ||||
|  | ||||
| By contributing to this project you agree to the [Developer Certificate of | ||||
| Origin](https://developercertificate.org/) (DCO). This document was created | ||||
| by the Linux Kernel community and is a simple statement that you, as a | ||||
| contributor, have the legal right to make the contribution. See the [DCO](DCO) | ||||
| file for details. | ||||
|  | ||||
| ## Getting Started | ||||
|  | ||||
| This library uses [glide](https://github.com/Masterminds/glide) to manage dependencies. | ||||
|  | ||||
| To get started, make sure you clone the Git repository into the correct location | ||||
| `github.com/uber/jaeger-client-go` relative to `$GOPATH`: | ||||
|  | ||||
| ``` | ||||
| mkdir -p $GOPATH/src/github.com/uber | ||||
| cd $GOPATH/src/github.com/uber | ||||
| git clone git@github.com:jaegertracing/jaeger-client-go.git jaeger-client-go | ||||
| cd jaeger-client-go | ||||
| ``` | ||||
|  | ||||
| Then install dependencies and run the tests: | ||||
|  | ||||
| ``` | ||||
| git submodule update --init --recursive | ||||
| glide install | ||||
| make test | ||||
| ``` | ||||
|  | ||||
| ## Imports grouping | ||||
|  | ||||
| This projects follows the following pattern for grouping imports in Go files: | ||||
|   * imports from standard library | ||||
|   * imports from other projects | ||||
|   * imports from `jaeger-client-go` project | ||||
|    | ||||
| For example: | ||||
|  | ||||
| ```go | ||||
| import ( | ||||
| 	"fmt" | ||||
|   | ||||
| 	"github.com/uber/jaeger-lib/metrics" | ||||
| 	"go.uber.org/zap" | ||||
|  | ||||
| 	"github.com/uber/jaeger-client-go/config" | ||||
| ) | ||||
| ``` | ||||
|  | ||||
| ## Making A Change | ||||
|  | ||||
| *Before making any significant changes, please [open an | ||||
| issue](https://github.com/jaegertracing/jaeger-client-go/issues).* Discussing your proposed | ||||
| changes ahead of time will make the contribution process smooth for everyone. | ||||
|  | ||||
| Once we've discussed your changes and you've got your code ready, make sure | ||||
| that tests are passing (`make test` or `make cover`) and open your PR. Your | ||||
| pull request is most likely to be accepted if it: | ||||
|  | ||||
| * Includes tests for new functionality. | ||||
| * Follows the guidelines in [Effective | ||||
|   Go](https://golang.org/doc/effective_go.html) and the [Go team's common code | ||||
|   review comments](https://github.com/golang/go/wiki/CodeReviewComments). | ||||
| * Has a [good commit message](https://chris.beams.io/posts/git-commit/): | ||||
|    * Separate subject from body with a blank line | ||||
|    * Limit the subject line to 50 characters | ||||
|    * Capitalize the subject line | ||||
|    * Do not end the subject line with a period | ||||
|    * Use the imperative mood in the subject line | ||||
|    * Wrap the body at 72 characters | ||||
|    * Use the body to explain _what_ and _why_ instead of _how_ | ||||
| * Each commit must be signed by the author ([see below](#sign-your-work)). | ||||
|  | ||||
| ## License | ||||
|  | ||||
| By contributing your code, you agree to license your contribution under the terms | ||||
| of the [Apache License](LICENSE). | ||||
|  | ||||
| If you are adding a new file it should have a header like below.  The easiest | ||||
| way to add such header is to run `make fmt`. | ||||
|  | ||||
| ``` | ||||
| // Copyright (c) 2017 The Jaeger Authors. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
| ``` | ||||
|  | ||||
| ## Sign your work | ||||
|  | ||||
| The sign-off is a simple line at the end of the explanation for the | ||||
| patch, which certifies that you wrote it or otherwise have the right to | ||||
| pass it on as an open-source patch.  The rules are pretty simple: if you | ||||
| can certify the below (from | ||||
| [developercertificate.org](http://developercertificate.org/)): | ||||
|  | ||||
| ``` | ||||
| Developer Certificate of Origin | ||||
| Version 1.1 | ||||
|  | ||||
| Copyright (C) 2004, 2006 The Linux Foundation and its contributors. | ||||
| 660 York Street, Suite 102, | ||||
| San Francisco, CA 94110 USA | ||||
|  | ||||
| Everyone is permitted to copy and distribute verbatim copies of this | ||||
| license document, but changing it is not allowed. | ||||
|  | ||||
|  | ||||
| Developer's Certificate of Origin 1.1 | ||||
|  | ||||
| By making a contribution to this project, I certify that: | ||||
|  | ||||
| (a) The contribution was created in whole or in part by me and I | ||||
|     have the right to submit it under the open source license | ||||
|     indicated in the file; or | ||||
|  | ||||
| (b) The contribution is based upon previous work that, to the best | ||||
|     of my knowledge, is covered under an appropriate open source | ||||
|     license and I have the right under that license to submit that | ||||
|     work with modifications, whether created in whole or in part | ||||
|     by me, under the same open source license (unless I am | ||||
|     permitted to submit under a different license), as indicated | ||||
|     in the file; or | ||||
|  | ||||
| (c) The contribution was provided directly to me by some other | ||||
|     person who certified (a), (b) or (c) and I have not modified | ||||
|     it. | ||||
|  | ||||
| (d) I understand and agree that this project and the contribution | ||||
|     are public and that a record of the contribution (including all | ||||
|     personal information I submit with it, including my sign-off) is | ||||
|     maintained indefinitely and may be redistributed consistent with | ||||
|     this project or the open source license(s) involved. | ||||
| ``` | ||||
|  | ||||
| then you just add a line to every git commit message: | ||||
|  | ||||
|     Signed-off-by: Joe Smith <joe@gmail.com> | ||||
|  | ||||
| using your real name (sorry, no pseudonyms or anonymous contributions.) | ||||
|  | ||||
| You can add the sign off when creating the git commit via `git commit -s`. | ||||
|  | ||||
| If you want this to be automatic you can set up some aliases: | ||||
|  | ||||
| ``` | ||||
| git config --add alias.amend "commit -s --amend" | ||||
| git config --add alias.c "commit -s" | ||||
| ``` | ||||
							
								
								
									
										37
									
								
								vendor/github.com/uber/jaeger-client-go/DCO
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										37
									
								
								vendor/github.com/uber/jaeger-client-go/DCO
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,37 @@ | ||||
| Developer Certificate of Origin | ||||
| Version 1.1 | ||||
|  | ||||
| Copyright (C) 2004, 2006 The Linux Foundation and its contributors. | ||||
| 660 York Street, Suite 102, | ||||
| San Francisco, CA 94110 USA | ||||
|  | ||||
| Everyone is permitted to copy and distribute verbatim copies of this | ||||
| license document, but changing it is not allowed. | ||||
|  | ||||
|  | ||||
| Developer's Certificate of Origin 1.1 | ||||
|  | ||||
| By making a contribution to this project, I certify that: | ||||
|  | ||||
| (a) The contribution was created in whole or in part by me and I | ||||
|     have the right to submit it under the open source license | ||||
|     indicated in the file; or | ||||
|  | ||||
| (b) The contribution is based upon previous work that, to the best | ||||
|     of my knowledge, is covered under an appropriate open source | ||||
|     license and I have the right under that license to submit that | ||||
|     work with modifications, whether created in whole or in part | ||||
|     by me, under the same open source license (unless I am | ||||
|     permitted to submit under a different license), as indicated | ||||
|     in the file; or | ||||
|  | ||||
| (c) The contribution was provided directly to me by some other | ||||
|     person who certified (a), (b) or (c) and I have not modified | ||||
|     it. | ||||
|  | ||||
| (d) I understand and agree that this project and the contribution | ||||
|     are public and that a record of the contribution (including all | ||||
|     personal information I submit with it, including my sign-off) is | ||||
|     maintained indefinitely and may be redistributed consistent with | ||||
|     this project or the open source license(s) involved. | ||||
|  | ||||
							
								
								
									
										164
									
								
								vendor/github.com/uber/jaeger-client-go/Gopkg.lock
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										164
									
								
								vendor/github.com/uber/jaeger-client-go/Gopkg.lock
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,164 @@ | ||||
| # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. | ||||
|  | ||||
|  | ||||
| [[projects]] | ||||
|   branch = "master" | ||||
|   name = "github.com/beorn7/perks" | ||||
|   packages = ["quantile"] | ||||
|   revision = "3a771d992973f24aa725d07868b467d1ddfceafb" | ||||
|  | ||||
| [[projects]] | ||||
|   branch = "master" | ||||
|   name = "github.com/codahale/hdrhistogram" | ||||
|   packages = ["."] | ||||
|   revision = "3a0bb77429bd3a61596f5e8a3172445844342120" | ||||
|  | ||||
| [[projects]] | ||||
|   branch = "master" | ||||
|   name = "github.com/crossdock/crossdock-go" | ||||
|   packages = [ | ||||
|     ".", | ||||
|     "assert", | ||||
|     "require" | ||||
|   ] | ||||
|   revision = "049aabb0122b03bc9bd30cab8f3f91fb60166361" | ||||
|  | ||||
| [[projects]] | ||||
|   name = "github.com/davecgh/go-spew" | ||||
|   packages = ["spew"] | ||||
|   revision = "346938d642f2ec3594ed81d874461961cd0faa76" | ||||
|   version = "v1.1.0" | ||||
|  | ||||
| [[projects]] | ||||
|   name = "github.com/golang/protobuf" | ||||
|   packages = ["proto"] | ||||
|   revision = "925541529c1fa6821df4e44ce2723319eb2be768" | ||||
|   version = "v1.0.0" | ||||
|  | ||||
| [[projects]] | ||||
|   name = "github.com/matttproud/golang_protobuf_extensions" | ||||
|   packages = ["pbutil"] | ||||
|   revision = "3247c84500bff8d9fb6d579d800f20b3e091582c" | ||||
|   version = "v1.0.0" | ||||
|  | ||||
| [[projects]] | ||||
|   name = "github.com/opentracing/opentracing-go" | ||||
|   packages = [ | ||||
|     ".", | ||||
|     "ext", | ||||
|     "log" | ||||
|   ] | ||||
|   revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38" | ||||
|   version = "v1.0.2" | ||||
|  | ||||
| [[projects]] | ||||
|   name = "github.com/pkg/errors" | ||||
|   packages = ["."] | ||||
|   revision = "645ef00459ed84a119197bfb8d8205042c6df63d" | ||||
|   version = "v0.8.0" | ||||
|  | ||||
| [[projects]] | ||||
|   name = "github.com/pmezard/go-difflib" | ||||
|   packages = ["difflib"] | ||||
|   revision = "792786c7400a136282c1664665ae0a8db921c6c2" | ||||
|   version = "v1.0.0" | ||||
|  | ||||
| [[projects]] | ||||
|   name = "github.com/prometheus/client_golang" | ||||
|   packages = ["prometheus"] | ||||
|   revision = "c5b7fccd204277076155f10851dad72b76a49317" | ||||
|   version = "v0.8.0" | ||||
|  | ||||
| [[projects]] | ||||
|   branch = "master" | ||||
|   name = "github.com/prometheus/client_model" | ||||
|   packages = ["go"] | ||||
|   revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c" | ||||
|  | ||||
| [[projects]] | ||||
|   branch = "master" | ||||
|   name = "github.com/prometheus/common" | ||||
|   packages = [ | ||||
|     "expfmt", | ||||
|     "internal/bitbucket.org/ww/goautoneg", | ||||
|     "model" | ||||
|   ] | ||||
|   revision = "d811d2e9bf898806ecfb6ef6296774b13ffc314c" | ||||
|  | ||||
| [[projects]] | ||||
|   branch = "master" | ||||
|   name = "github.com/prometheus/procfs" | ||||
|   packages = [ | ||||
|     ".", | ||||
|     "internal/util", | ||||
|     "nfs", | ||||
|     "xfs" | ||||
|   ] | ||||
|   revision = "8b1c2da0d56deffdbb9e48d4414b4e674bd8083e" | ||||
|  | ||||
| [[projects]] | ||||
|   name = "github.com/stretchr/testify" | ||||
|   packages = [ | ||||
|     "assert", | ||||
|     "require", | ||||
|     "suite" | ||||
|   ] | ||||
|   revision = "12b6f73e6084dad08a7c6e575284b177ecafbc71" | ||||
|   version = "v1.2.1" | ||||
|  | ||||
| [[projects]] | ||||
|   name = "github.com/uber-go/atomic" | ||||
|   packages = ["."] | ||||
|   revision = "8474b86a5a6f79c443ce4b2992817ff32cf208b8" | ||||
|   version = "v1.3.1" | ||||
|  | ||||
| [[projects]] | ||||
|   name = "github.com/uber/jaeger-lib" | ||||
|   packages = [ | ||||
|     "metrics", | ||||
|     "metrics/prometheus", | ||||
|     "metrics/testutils" | ||||
|   ] | ||||
|   revision = "4267858c0679cd4e47cefed8d7f70fd386cfb567" | ||||
|   version = "v1.4.0" | ||||
|  | ||||
| [[projects]] | ||||
|   name = "go.uber.org/atomic" | ||||
|   packages = ["."] | ||||
|   revision = "54f72d32435d760d5604f17a82e2435b28dc4ba5" | ||||
|   version = "v1.3.0" | ||||
|  | ||||
| [[projects]] | ||||
|   name = "go.uber.org/multierr" | ||||
|   packages = ["."] | ||||
|   revision = "3c4937480c32f4c13a875a1829af76c98ca3d40a" | ||||
|   version = "v1.1.0" | ||||
|  | ||||
| [[projects]] | ||||
|   name = "go.uber.org/zap" | ||||
|   packages = [ | ||||
|     ".", | ||||
|     "buffer", | ||||
|     "internal/bufferpool", | ||||
|     "internal/color", | ||||
|     "internal/exit", | ||||
|     "zapcore" | ||||
|   ] | ||||
|   revision = "eeedf312bc6c57391d84767a4cd413f02a917974" | ||||
|   version = "v1.8.0" | ||||
|  | ||||
| [[projects]] | ||||
|   branch = "master" | ||||
|   name = "golang.org/x/net" | ||||
|   packages = [ | ||||
|     "context", | ||||
|     "context/ctxhttp" | ||||
|   ] | ||||
|   revision = "5f9ae10d9af5b1c89ae6904293b14b064d4ada23" | ||||
|  | ||||
| [solve-meta] | ||||
|   analyzer-name = "dep" | ||||
|   analyzer-version = 1 | ||||
|   inputs-digest = "f9dcfaf37a785c5dac1e20c29605eda29a83ba9c6f8842e92960dc94c8c4ff80" | ||||
|   solver-name = "gps-cdcl" | ||||
|   solver-version = 1 | ||||
							
								
								
									
										27
									
								
								vendor/github.com/uber/jaeger-client-go/Gopkg.toml
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										27
									
								
								vendor/github.com/uber/jaeger-client-go/Gopkg.toml
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,27 @@ | ||||
| [[constraint]] | ||||
|   name = "github.com/crossdock/crossdock-go" | ||||
|   branch = "master" | ||||
|  | ||||
| [[constraint]] | ||||
|   name = "github.com/opentracing/opentracing-go" | ||||
|   version = "^1" | ||||
|  | ||||
| [[constraint]] | ||||
|   name = "github.com/prometheus/client_golang" | ||||
|   version = "0.8.0" | ||||
|  | ||||
| [[constraint]] | ||||
|   name = "github.com/stretchr/testify" | ||||
|   version = "^1.1.3" | ||||
|  | ||||
| [[constraint]] | ||||
|   name = "github.com/uber-go/atomic" | ||||
|   version = "^1" | ||||
|  | ||||
| [[constraint]] | ||||
|   name = "github.com/uber/jaeger-lib" | ||||
|   version = "^1.3" | ||||
|  | ||||
| [[constraint]] | ||||
|   name = "go.uber.org/zap" | ||||
|   version = "^1" | ||||
							
								
								
									
										201
									
								
								vendor/github.com/uber/jaeger-client-go/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										201
									
								
								vendor/github.com/uber/jaeger-client-go/LICENSE
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,201 @@ | ||||
|                                  Apache License | ||||
|                            Version 2.0, January 2004 | ||||
|                         http://www.apache.org/licenses/ | ||||
|  | ||||
|    TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION | ||||
|  | ||||
|    1. Definitions. | ||||
|  | ||||
|       "License" shall mean the terms and conditions for use, reproduction, | ||||
|       and distribution as defined by Sections 1 through 9 of this document. | ||||
|  | ||||
|       "Licensor" shall mean the copyright owner or entity authorized by | ||||
|       the copyright owner that is granting the License. | ||||
|  | ||||
|       "Legal Entity" shall mean the union of the acting entity and all | ||||
|       other entities that control, are controlled by, or are under common | ||||
|       control with that entity. For the purposes of this definition, | ||||
|       "control" means (i) the power, direct or indirect, to cause the | ||||
|       direction or management of such entity, whether by contract or | ||||
|       otherwise, or (ii) ownership of fifty percent (50%) or more of the | ||||
|       outstanding shares, or (iii) beneficial ownership of such entity. | ||||
|  | ||||
|       "You" (or "Your") shall mean an individual or Legal Entity | ||||
|       exercising permissions granted by this License. | ||||
|  | ||||
|       "Source" form shall mean the preferred form for making modifications, | ||||
|       including but not limited to software source code, documentation | ||||
|       source, and configuration files. | ||||
|  | ||||
|       "Object" form shall mean any form resulting from mechanical | ||||
|       transformation or translation of a Source form, including but | ||||
|       not limited to compiled object code, generated documentation, | ||||
|       and conversions to other media types. | ||||
|  | ||||
|       "Work" shall mean the work of authorship, whether in Source or | ||||
|       Object form, made available under the License, as indicated by a | ||||
|       copyright notice that is included in or attached to the work | ||||
|       (an example is provided in the Appendix below). | ||||
|  | ||||
|       "Derivative Works" shall mean any work, whether in Source or Object | ||||
|       form, that is based on (or derived from) the Work and for which the | ||||
|       editorial revisions, annotations, elaborations, or other modifications | ||||
|       represent, as a whole, an original work of authorship. For the purposes | ||||
|       of this License, Derivative Works shall not include works that remain | ||||
|       separable from, or merely link (or bind by name) to the interfaces of, | ||||
|       the Work and Derivative Works thereof. | ||||
|  | ||||
|       "Contribution" shall mean any work of authorship, including | ||||
|       the original version of the Work and any modifications or additions | ||||
|       to that Work or Derivative Works thereof, that is intentionally | ||||
|       submitted to Licensor for inclusion in the Work by the copyright owner | ||||
|       or by an individual or Legal Entity authorized to submit on behalf of | ||||
|       the copyright owner. For the purposes of this definition, "submitted" | ||||
|       means any form of electronic, verbal, or written communication sent | ||||
|       to the Licensor or its representatives, including but not limited to | ||||
|       communication on electronic mailing lists, source code control systems, | ||||
|       and issue tracking systems that are managed by, or on behalf of, the | ||||
|       Licensor for the purpose of discussing and improving the Work, but | ||||
|       excluding communication that is conspicuously marked or otherwise | ||||
|       designated in writing by the copyright owner as "Not a Contribution." | ||||
|  | ||||
|       "Contributor" shall mean Licensor and any individual or Legal Entity | ||||
|       on behalf of whom a Contribution has been received by Licensor and | ||||
|       subsequently incorporated within the Work. | ||||
|  | ||||
|    2. Grant of Copyright License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       copyright license to reproduce, prepare Derivative Works of, | ||||
|       publicly display, publicly perform, sublicense, and distribute the | ||||
|       Work and such Derivative Works in Source or Object form. | ||||
|  | ||||
|    3. Grant of Patent License. Subject to the terms and conditions of | ||||
|       this License, each Contributor hereby grants to You a perpetual, | ||||
|       worldwide, non-exclusive, no-charge, royalty-free, irrevocable | ||||
|       (except as stated in this section) patent license to make, have made, | ||||
|       use, offer to sell, sell, import, and otherwise transfer the Work, | ||||
|       where such license applies only to those patent claims licensable | ||||
|       by such Contributor that are necessarily infringed by their | ||||
|       Contribution(s) alone or by combination of their Contribution(s) | ||||
|       with the Work to which such Contribution(s) was submitted. If You | ||||
|       institute patent litigation against any entity (including a | ||||
|       cross-claim or counterclaim in a lawsuit) alleging that the Work | ||||
|       or a Contribution incorporated within the Work constitutes direct | ||||
|       or contributory patent infringement, then any patent licenses | ||||
|       granted to You under this License for that Work shall terminate | ||||
|       as of the date such litigation is filed. | ||||
|  | ||||
|    4. Redistribution. You may reproduce and distribute copies of the | ||||
|       Work or Derivative Works thereof in any medium, with or without | ||||
|       modifications, and in Source or Object form, provided that You | ||||
|       meet the following conditions: | ||||
|  | ||||
|       (a) You must give any other recipients of the Work or | ||||
|           Derivative Works a copy of this License; and | ||||
|  | ||||
|       (b) You must cause any modified files to carry prominent notices | ||||
|           stating that You changed the files; and | ||||
|  | ||||
|       (c) You must retain, in the Source form of any Derivative Works | ||||
|           that You distribute, all copyright, patent, trademark, and | ||||
|           attribution notices from the Source form of the Work, | ||||
|           excluding those notices that do not pertain to any part of | ||||
|           the Derivative Works; and | ||||
|  | ||||
|       (d) If the Work includes a "NOTICE" text file as part of its | ||||
|           distribution, then any Derivative Works that You distribute must | ||||
|           include a readable copy of the attribution notices contained | ||||
|           within such NOTICE file, excluding those notices that do not | ||||
|           pertain to any part of the Derivative Works, in at least one | ||||
|           of the following places: within a NOTICE text file distributed | ||||
|           as part of the Derivative Works; within the Source form or | ||||
|           documentation, if provided along with the Derivative Works; or, | ||||
|           within a display generated by the Derivative Works, if and | ||||
|           wherever such third-party notices normally appear. The contents | ||||
|           of the NOTICE file are for informational purposes only and | ||||
|           do not modify the License. You may add Your own attribution | ||||
|           notices within Derivative Works that You distribute, alongside | ||||
|           or as an addendum to the NOTICE text from the Work, provided | ||||
|           that such additional attribution notices cannot be construed | ||||
|           as modifying the License. | ||||
|  | ||||
|       You may add Your own copyright statement to Your modifications and | ||||
|       may provide additional or different license terms and conditions | ||||
|       for use, reproduction, or distribution of Your modifications, or | ||||
|       for any such Derivative Works as a whole, provided Your use, | ||||
|       reproduction, and distribution of the Work otherwise complies with | ||||
|       the conditions stated in this License. | ||||
|  | ||||
|    5. Submission of Contributions. Unless You explicitly state otherwise, | ||||
|       any Contribution intentionally submitted for inclusion in the Work | ||||
|       by You to the Licensor shall be under the terms and conditions of | ||||
|       this License, without any additional terms or conditions. | ||||
|       Notwithstanding the above, nothing herein shall supersede or modify | ||||
|       the terms of any separate license agreement you may have executed | ||||
|       with Licensor regarding such Contributions. | ||||
|  | ||||
|    6. Trademarks. This License does not grant permission to use the trade | ||||
|       names, trademarks, service marks, or product names of the Licensor, | ||||
|       except as required for reasonable and customary use in describing the | ||||
|       origin of the Work and reproducing the content of the NOTICE file. | ||||
|  | ||||
|    7. Disclaimer of Warranty. Unless required by applicable law or | ||||
|       agreed to in writing, Licensor provides the Work (and each | ||||
|       Contributor provides its Contributions) on an "AS IS" BASIS, | ||||
|       WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or | ||||
|       implied, including, without limitation, any warranties or conditions | ||||
|       of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A | ||||
|       PARTICULAR PURPOSE. You are solely responsible for determining the | ||||
|       appropriateness of using or redistributing the Work and assume any | ||||
|       risks associated with Your exercise of permissions under this License. | ||||
|  | ||||
|    8. Limitation of Liability. In no event and under no legal theory, | ||||
|       whether in tort (including negligence), contract, or otherwise, | ||||
|       unless required by applicable law (such as deliberate and grossly | ||||
|       negligent acts) or agreed to in writing, shall any Contributor be | ||||
|       liable to You for damages, including any direct, indirect, special, | ||||
|       incidental, or consequential damages of any character arising as a | ||||
|       result of this License or out of the use or inability to use the | ||||
|       Work (including but not limited to damages for loss of goodwill, | ||||
|       work stoppage, computer failure or malfunction, or any and all | ||||
|       other commercial damages or losses), even if such Contributor | ||||
|       has been advised of the possibility of such damages. | ||||
|  | ||||
|    9. Accepting Warranty or Additional Liability. While redistributing | ||||
|       the Work or Derivative Works thereof, You may choose to offer, | ||||
|       and charge a fee for, acceptance of support, warranty, indemnity, | ||||
|       or other liability obligations and/or rights consistent with this | ||||
|       License. However, in accepting such obligations, You may act only | ||||
|       on Your own behalf and on Your sole responsibility, not on behalf | ||||
|       of any other Contributor, and only if You agree to indemnify, | ||||
|       defend, and hold each Contributor harmless for any liability | ||||
|       incurred by, or claims asserted against, such Contributor by reason | ||||
|       of your accepting any such warranty or additional liability. | ||||
|  | ||||
|    END OF TERMS AND CONDITIONS | ||||
|  | ||||
|    APPENDIX: How to apply the Apache License to your work. | ||||
|  | ||||
|       To apply the Apache License to your work, attach the following | ||||
|       boilerplate notice, with the fields enclosed by brackets "[]" | ||||
|       replaced with your own identifying information. (Don't include | ||||
|       the brackets!)  The text should be enclosed in the appropriate | ||||
|       comment syntax for the file format. We also recommend that a | ||||
|       file or class name and description of purpose be included on the | ||||
|       same "printed page" as the copyright notice for easier | ||||
|       identification within third-party archives. | ||||
|  | ||||
|    Copyright [yyyy] [name of copyright owner] | ||||
|  | ||||
|    Licensed under the Apache License, Version 2.0 (the "License"); | ||||
|    you may not use this file except in compliance with the License. | ||||
|    You may obtain a copy of the License at | ||||
|  | ||||
|        http://www.apache.org/licenses/LICENSE-2.0 | ||||
|  | ||||
|    Unless required by applicable law or agreed to in writing, software | ||||
|    distributed under the License is distributed on an "AS IS" BASIS, | ||||
|    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
|    See the License for the specific language governing permissions and | ||||
|    limitations under the License. | ||||
							
								
								
									
										117
									
								
								vendor/github.com/uber/jaeger-client-go/Makefile
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										117
									
								
								vendor/github.com/uber/jaeger-client-go/Makefile
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,117 @@ | ||||
| PROJECT_ROOT=github.com/uber/jaeger-client-go | ||||
| PACKAGES := $(shell glide novendor | grep -v -e ./thrift-gen/... -e ./thrift/...) | ||||
| # all .go files that don't exist in hidden directories | ||||
| ALL_SRC := $(shell find . -name "*.go" | grep -v -e vendor -e thrift-gen -e ./thrift/ \ | ||||
|         -e ".*/\..*" \ | ||||
|         -e ".*/_.*" \ | ||||
|         -e ".*/mocks.*") | ||||
|  | ||||
| -include crossdock/rules.mk | ||||
|  | ||||
| export GO15VENDOREXPERIMENT=1 | ||||
|  | ||||
| RACE=-race | ||||
| GOTEST=go test -v $(RACE) | ||||
| GOLINT=golint | ||||
| GOVET=go vet | ||||
| GOFMT=gofmt | ||||
| FMT_LOG=fmt.log | ||||
| LINT_LOG=lint.log | ||||
|  | ||||
| THRIFT_VER=0.9.3 | ||||
| THRIFT_IMG=thrift:$(THRIFT_VER) | ||||
| THRIFT=docker run -v "${PWD}:/data" $(THRIFT_IMG) thrift | ||||
| THRIFT_GO_ARGS=thrift_import="github.com/apache/thrift/lib/go/thrift" | ||||
| THRIFT_GEN_DIR=thrift-gen | ||||
|  | ||||
| PASS=$(shell printf "\033[32mPASS\033[0m") | ||||
| FAIL=$(shell printf "\033[31mFAIL\033[0m") | ||||
| COLORIZE=sed ''/PASS/s//$(PASS)/'' | sed ''/FAIL/s//$(FAIL)/'' | ||||
|  | ||||
| .DEFAULT_GOAL := test-and-lint | ||||
|  | ||||
| .PHONY: test-and-lint | ||||
| test-and-lint: test fmt lint | ||||
|  | ||||
| .PHONY: test | ||||
| test: | ||||
| 	bash -c "set -e; set -o pipefail; $(GOTEST) $(PACKAGES) | $(COLORIZE)" | ||||
|  | ||||
| .PHONY: fmt | ||||
| fmt: | ||||
| 	$(GOFMT) -e -s -l -w $(ALL_SRC) | ||||
| 	./scripts/updateLicenses.sh | ||||
|  | ||||
| .PHONY: lint | ||||
| lint: | ||||
| 	$(GOVET) $(PACKAGES) | ||||
| 	@cat /dev/null > $(LINT_LOG) | ||||
| 	@$(foreach pkg, $(PACKAGES), $(GOLINT) $(pkg) | grep -v crossdock/thrift >> $(LINT_LOG) || true;) | ||||
| 	@[ ! -s "$(LINT_LOG)" ] || (echo "Lint Failures" | cat - $(LINT_LOG) && false) | ||||
| 	@$(GOFMT) -e -s -l $(ALL_SRC) > $(FMT_LOG) | ||||
| 	./scripts/updateLicenses.sh >> $(FMT_LOG) | ||||
| 	@[ ! -s "$(FMT_LOG)" ] || (echo "go fmt or license check failures, run 'make fmt'" | cat - $(FMT_LOG) && false) | ||||
|  | ||||
|  | ||||
| .PHONY: install | ||||
| install: | ||||
| 	glide --version || go get github.com/Masterminds/glide | ||||
| ifeq ($(USE_DEP),true) | ||||
| 	dep ensure | ||||
| else | ||||
| 	glide install | ||||
| endif | ||||
|  | ||||
|  | ||||
| .PHONY: cover | ||||
| cover: | ||||
| 	./scripts/cover.sh $(shell go list $(PACKAGES)) | ||||
| 	go tool cover -html=cover.out -o cover.html | ||||
|  | ||||
|  | ||||
| # This is not part of the regular test target because we don't want to slow it | ||||
| # down. | ||||
| .PHONY: test-examples | ||||
| test-examples: | ||||
| 	make -C examples | ||||
|  | ||||
| # TODO at the moment we're not generating tchan_*.go files | ||||
| thrift: idl-submodule thrift-image | ||||
| 	$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/agent.thrift | ||||
| 	$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/sampling.thrift | ||||
| 	$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/jaeger.thrift | ||||
| 	$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/zipkincore.thrift | ||||
| 	$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/$(THRIFT_GEN_DIR) /data/idl/thrift/baggage.thrift | ||||
| 	$(THRIFT) -o /data --gen go:$(THRIFT_GO_ARGS) --out /data/crossdock/thrift/ /data/idl/thrift/crossdock/tracetest.thrift | ||||
| 	sed -i '' 's|"zipkincore"|"$(PROJECT_ROOT)/thrift-gen/zipkincore"|g' $(THRIFT_GEN_DIR)/agent/*.go | ||||
| 	sed -i '' 's|"jaeger"|"$(PROJECT_ROOT)/thrift-gen/jaeger"|g' $(THRIFT_GEN_DIR)/agent/*.go | ||||
| 	sed -i '' 's|"github.com/apache/thrift/lib/go/thrift"|"github.com/uber/jaeger-client-go/thrift"|g' \ | ||||
| 		$(THRIFT_GEN_DIR)/*/*.go crossdock/thrift/tracetest/*.go | ||||
| 	rm -rf thrift-gen/*/*-remote | ||||
| 	rm -rf crossdock/thrift/*/*-remote | ||||
| 	rm -rf thrift-gen/jaeger/collector.go | ||||
|  | ||||
| idl-submodule: | ||||
| 	git submodule init | ||||
| 	git submodule update | ||||
|  | ||||
| thrift-image: | ||||
| 	$(THRIFT) -version | ||||
|  | ||||
| .PHONY: install-dep-ci | ||||
| install-dep-ci: | ||||
| 	- curl -L -s https://github.com/golang/dep/releases/download/v0.3.2/dep-linux-amd64 -o $$GOPATH/bin/dep | ||||
| 	- chmod +x $$GOPATH/bin/dep | ||||
|  | ||||
| .PHONY: install-ci | ||||
| install-ci: install-dep-ci install | ||||
| 	go get github.com/wadey/gocovmerge | ||||
| 	go get github.com/mattn/goveralls | ||||
| 	go get golang.org/x/tools/cmd/cover | ||||
| 	go get github.com/golang/lint/golint | ||||
|  | ||||
| .PHONY: test-ci | ||||
| test-ci: | ||||
| 	@./scripts/cover.sh $(shell go list $(PACKAGES)) | ||||
| 	make lint | ||||
|  | ||||
							
								
								
									
										260
									
								
								vendor/github.com/uber/jaeger-client-go/README.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										260
									
								
								vendor/github.com/uber/jaeger-client-go/README.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,260 @@ | ||||
| [![GoDoc][doc-img]][doc] [![Build Status][ci-img]][ci] [![Coverage Status][cov-img]][cov] [![OpenTracing 1.0 Enabled][ot-img]][ot-url] | ||||
|  | ||||
| # Jaeger Bindings for Go OpenTracing API | ||||
|  | ||||
| Instrumentation library that implements an | ||||
| [OpenTracing](http://opentracing.io) Tracer for Jaeger (https://jaegertracing.io). | ||||
|  | ||||
| **IMPORTANT**: The library's import path is based on its original location under `github.com/uber`. Do not try to import it as `github.com/jaegertracing`, it will not compile. We might revisit this in the next major release. | ||||
|   * :white_check_mark: `import "github.com/uber/jaeger-client-go"` | ||||
|   * :x: `import "github.com/jaegertracing/jaeger-client-go"` | ||||
|  | ||||
| ## How to Contribute | ||||
|  | ||||
| Please see [CONTRIBUTING.md](CONTRIBUTING.md). | ||||
|  | ||||
| ## Installation | ||||
|  | ||||
| We recommended using a dependency manager like [glide](https://github.com/Masterminds/glide) | ||||
| and [semantic versioning](http://semver.org/) when including this library into an application. | ||||
| For example, Jaeger backend imports this library like this: | ||||
|  | ||||
| ```yaml | ||||
| - package: github.com/uber/jaeger-client-go | ||||
|   version: ^2.7.0 | ||||
| ``` | ||||
|  | ||||
| If you instead want to use the latest version in `master`, you can pull it via `go get`. | ||||
| Note that during `go get` you may see build errors due to incompatible dependencies, which is why | ||||
| we recommend using semantic versions for dependencies.  The error  may be fixed by running | ||||
| `make install` (it will install `glide` if you don't have it): | ||||
|  | ||||
| ```shell | ||||
| go get -u github.com/uber/jaeger-client-go/ | ||||
| cd $GOPATH/src/github.com/uber/jaeger-client-go/ | ||||
| git submodule update --init --recursive | ||||
| make install | ||||
| ``` | ||||
|  | ||||
| ## Initialization | ||||
|  | ||||
| See tracer initialization examples in [godoc](https://godoc.org/github.com/uber/jaeger-client-go/config#pkg-examples) | ||||
| and [config/example_test.go](./config/example_test.go). | ||||
|  | ||||
| ### Environment variables | ||||
|  | ||||
| The tracer can be initialized with values coming from environment variables. None of the env vars are required | ||||
| and all of them can be overriden via direct setting of the property on the configuration object. | ||||
|  | ||||
| Property| Description | ||||
| --- | --- | ||||
| JAEGER_SERVICE_NAME | The service name | ||||
| JAEGER_AGENT_HOST | The hostname for communicating with agent via UDP | ||||
| JAEGER_AGENT_PORT | The port for communicating with agent via UDP | ||||
| JAEGER_REPORTER_LOG_SPANS | Whether the reporter should also log the spans | ||||
| JAEGER_REPORTER_MAX_QUEUE_SIZE | The reporter's maximum queue size | ||||
| JAEGER_REPORTER_FLUSH_INTERVAL | The reporter's flush interval (ms) | ||||
| JAEGER_SAMPLER_TYPE | The sampler type | ||||
| JAEGER_SAMPLER_PARAM | The sampler parameter (number) | ||||
| JAEGER_SAMPLER_MANAGER_HOST_PORT | The host name and port when using the remote controlled sampler | ||||
| JAEGER_SAMPLER_MAX_OPERATIONS | The maximum number of operations that the sampler will keep track of | ||||
| JAEGER_SAMPLER_REFRESH_INTERVAL | How often the remotely controlled sampler will poll jaeger-agent for the appropriate sampling strategy | ||||
| JAEGER_TAGS | A comma separated list of `name = value` tracer level tags, which get added to all reported spans. The value can also refer to an environment variable using the format `${envVarName:default}`, where the `:default` is optional, and identifies a value to be used if the environment variable cannot be found | ||||
| JAEGER_DISABLED | Whether the tracer is disabled or not. If true, the default `opentracing.NoopTracer` is used. | ||||
| JAEGER_RPC_METRICS | Whether to store RPC metrics | ||||
|  | ||||
| ### Closing the tracer via `io.Closer` | ||||
|  | ||||
| The constructor function for Jaeger Tracer returns the tracer itself and an `io.Closer` instance. | ||||
| It is recommended to structure your `main()` so that it calls the `Close()` function on the closer | ||||
| before exiting, e.g. | ||||
|  | ||||
| ```go | ||||
| tracer, closer, err := cfg.NewTracer(...) | ||||
| defer closer.Close() | ||||
| ``` | ||||
|  | ||||
| This is especially useful for command-line tools that enable tracing, as well as | ||||
| for the long-running apps that support graceful shutdown. For example, if your deployment | ||||
| system sends SIGTERM instead of killing the process and you trap that signal to do a graceful | ||||
| exit, then having `defer closer.Closer()` ensures that all buffered spans are flushed. | ||||
|  | ||||
| ### Metrics & Monitoring | ||||
|  | ||||
| The tracer emits a number of different metrics, defined in | ||||
| [metrics.go](metrics.go). The monitoring backend is expected to support | ||||
| tag-based metric names, e.g. instead of `statsd`-style string names | ||||
| like `counters.my-service.jaeger.spans.started.sampled`, the metrics | ||||
| are defined by a short name and a collection of key/value tags, for | ||||
| example: `name:jaeger.traces, state:started, sampled:y`. See [metrics.go](./metrics.go) | ||||
| file for the full list and descriptions of emitted metrics. | ||||
|  | ||||
| The monitoring backend is represented by the `metrics.Factory` interface from package | ||||
| [`"github.com/uber/jaeger-lib/metrics"`](https://github.com/jaegertracing/jaeger-lib/tree/master/metrics).  An implementation | ||||
| of that interface can be passed as an option to either the Configuration object or the Tracer | ||||
| constructor, for example: | ||||
|  | ||||
| ```go | ||||
| import ( | ||||
|     "github.com/uber/jaeger-client-go/config" | ||||
|     "github.com/uber/jaeger-lib/metrics/prometheus" | ||||
| ) | ||||
|  | ||||
|     metricsFactory := prometheus.New() | ||||
|     tracer, closer, err := config.Configuration{ | ||||
|         ServiceName: "your-service-name", | ||||
|     }.NewTracer( | ||||
|         config.Metrics(metricsFactory), | ||||
|     ) | ||||
| ``` | ||||
|  | ||||
| By default, a no-op `metrics.NullFactory` is used. | ||||
|  | ||||
| ### Logging | ||||
|  | ||||
| The tracer can be configured with an optional logger, which will be | ||||
| used to log communication errors, or log spans if a logging reporter | ||||
| option is specified in the configuration. The logging API is abstracted | ||||
| by the [Logger](logger.go) interface. A logger instance implementing | ||||
| this interface can be set on the `Config` object before calling the | ||||
| `New` method. | ||||
|  | ||||
| Besides the [zap](https://github.com/uber-go/zap) implementation | ||||
| bundled with this package there is also a [go-kit](https://github.com/go-kit/kit) | ||||
| one in the [jaeger-lib](https://github.com/jaegertracing/jaeger-lib) repository. | ||||
|  | ||||
| ## Instrumentation for Tracing | ||||
|  | ||||
| Since this tracer is fully compliant with OpenTracing API 1.0, | ||||
| all code instrumentation should only use the API itself, as described | ||||
| in the [opentracing-go](https://github.com/opentracing/opentracing-go) documentation. | ||||
|  | ||||
| ## Features | ||||
|  | ||||
| ### Reporters | ||||
|  | ||||
| A "reporter" is a component that receives the finished spans and reports | ||||
| them to somewhere. Under normal circumstances, the Tracer | ||||
| should use the default `RemoteReporter`, which sends the spans out of | ||||
| process via configurable "transport". For testing purposes, one can | ||||
| use an `InMemoryReporter` that accumulates spans in a buffer and | ||||
| allows to retrieve them for later verification. Also available are | ||||
| `NullReporter`, a no-op reporter that does nothing, a `LoggingReporter` | ||||
| which logs all finished spans using their `String()` method, and a | ||||
| `CompositeReporter` that can be used to combine more than one reporter | ||||
| into one, e.g. to attach a logging reporter to the main remote reporter. | ||||
|  | ||||
| ### Span Reporting Transports | ||||
|  | ||||
| The remote reporter uses "transports" to actually send the spans out | ||||
| of process. Currently the supported transports include: | ||||
|   * [Jaeger Thrift](https://github.com/jaegertracing/jaeger-idl/blob/master/thrift/agent.thrift) over UDP or HTTP, | ||||
|   * [Zipkin Thrift](https://github.com/jaegertracing/jaeger-idl/blob/master/thrift/zipkincore.thrift) over HTTP. | ||||
|  | ||||
| ### Sampling | ||||
|  | ||||
| The tracer does not record all spans, but only those that have the | ||||
| sampling bit set in the `flags`. When a new trace is started and a new | ||||
| unique ID is generated, a sampling decision is made whether this trace | ||||
| should be sampled. The sampling decision is propagated to all downstream | ||||
| calls via the `flags` field of the trace context. The following samplers | ||||
| are available: | ||||
|   1. `RemotelyControlledSampler` uses one of the other simpler samplers | ||||
|      and periodically updates it by polling an external server. This | ||||
|      allows dynamic control of the sampling strategies. | ||||
|   1. `ConstSampler` always makes the same sampling decision for all | ||||
|      trace IDs. it can be configured to either sample all traces, or | ||||
|      to sample none. | ||||
|   1. `ProbabilisticSampler` uses a fixed sampling rate as a probability | ||||
|      for a given trace to be sampled. The actual decision is made by | ||||
|      comparing the trace ID with a random number multiplied by the | ||||
|      sampling rate. | ||||
|   1. `RateLimitingSampler` can be used to allow only a certain fixed | ||||
|      number of traces to be sampled per second. | ||||
|  | ||||
| ### Baggage Injection | ||||
|  | ||||
| The OpenTracing spec allows for [baggage][baggage], which are key value pairs that are added | ||||
| to the span context and propagated throughout the trace. An external process can inject baggage | ||||
| by setting the special HTTP Header `jaeger-baggage` on a request: | ||||
|  | ||||
| ```sh | ||||
| curl -H "jaeger-baggage: key1=value1, key2=value2" http://myhost.com | ||||
| ``` | ||||
|  | ||||
| Baggage can also be programatically set inside your service: | ||||
|  | ||||
| ```go | ||||
| if span := opentracing.SpanFromContext(ctx); span != nil { | ||||
|     span.SetBaggageItem("key", "value") | ||||
| } | ||||
| ``` | ||||
|  | ||||
| Another service downstream of that can retrieve the baggage in a similar way: | ||||
|  | ||||
| ```go | ||||
| if span := opentracing.SpanFromContext(ctx); span != nil { | ||||
|     val := span.BaggageItem("key") | ||||
|     println(val) | ||||
| } | ||||
| ``` | ||||
|  | ||||
| ### Debug Traces (Forced Sampling) | ||||
|  | ||||
| #### Programmatically | ||||
|  | ||||
| The OpenTracing API defines a `sampling.priority` standard tag that | ||||
| can be used to affect the sampling of a span and its children: | ||||
|  | ||||
| ```go | ||||
| import ( | ||||
|     "github.com/opentracing/opentracing-go" | ||||
|     "github.com/opentracing/opentracing-go/ext" | ||||
| ) | ||||
|  | ||||
| span := opentracing.SpanFromContext(ctx) | ||||
| ext.SamplingPriority.Set(span, 1)     | ||||
| ``` | ||||
|  | ||||
| #### Via HTTP Headers | ||||
|  | ||||
| Jaeger Tracer also understands a special HTTP Header `jaeger-debug-id`, | ||||
| which can be set in the incoming request, e.g. | ||||
|  | ||||
| ```sh | ||||
| curl -H "jaeger-debug-id: some-correlation-id" http://myhost.com | ||||
| ``` | ||||
|  | ||||
| When Jaeger sees this header in the request that otherwise has no | ||||
| tracing context, it ensures that the new trace started for this | ||||
| request will be sampled in the "debug" mode (meaning it should survive | ||||
| all downsampling that might happen in the collection pipeline), and the | ||||
| root span will have a tag as if this statement was executed: | ||||
|  | ||||
| ```go | ||||
| span.SetTag("jaeger-debug-id", "some-correlation-id") | ||||
| ``` | ||||
|  | ||||
| This allows using Jaeger UI to find the trace by this tag. | ||||
|  | ||||
| ### Zipkin HTTP B3 compatible header propagation | ||||
|  | ||||
| Jaeger Tracer supports Zipkin B3 Propagation HTTP headers, which are used | ||||
| by a lot of Zipkin tracers. This means that you can use Jaeger in conjunction with e.g. [these OpenZipkin tracers](https://github.com/openzipkin). | ||||
|  | ||||
| However it is not the default propagation format, see [here](zipkin/README.md#NewZipkinB3HTTPHeaderPropagator) how to set it up. | ||||
|  | ||||
| ## License | ||||
|  | ||||
| [Apache 2.0 License](LICENSE). | ||||
|  | ||||
|  | ||||
| [doc-img]: https://godoc.org/github.com/uber/jaeger-client-go?status.svg | ||||
| [doc]: https://godoc.org/github.com/uber/jaeger-client-go | ||||
| [ci-img]: https://travis-ci.org/jaegertracing/jaeger-client-go.svg?branch=master | ||||
| [ci]: https://travis-ci.org/jaegertracing/jaeger-client-go | ||||
| [cov-img]: https://codecov.io/gh/jaegertracing/jaeger-client-go/branch/master/graph/badge.svg | ||||
| [cov]: https://codecov.io/gh/jaegertracing/jaeger-client-go | ||||
| [ot-img]: https://img.shields.io/badge/OpenTracing--1.0-enabled-blue.svg | ||||
| [ot-url]: http://opentracing.io | ||||
| [baggage]: https://github.com/opentracing/specification/blob/master/specification.md#set-a-baggage-item | ||||
							
								
								
									
										11
									
								
								vendor/github.com/uber/jaeger-client-go/RELEASE.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										11
									
								
								vendor/github.com/uber/jaeger-client-go/RELEASE.md
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,11 @@ | ||||
| # Release Process | ||||
|  | ||||
| 1. Create a PR "Preparing for release X.Y.Z" against master branch | ||||
|     * Alter CHANGELOG.md from `<placeholder_version> (unreleased)` to `<X.Y.Z> (YYYY-MM-DD)` | ||||
|     * Update `JaegerClientVersion` in constants.go to `Go-X.Y.Z` | ||||
| 2. Create a release "Release X.Y.Z" on Github | ||||
|     * Create Tag `vX.Y.Z` | ||||
|     * Copy CHANGELOG.md into the release notes | ||||
| 3. Create a PR "Back to development" against master branch | ||||
|     * Add `<next_version> (unreleased)` to CHANGELOG.md | ||||
|     * Update `JaegerClientVersion` in constants.go to `Go-<next_version>dev` | ||||
							
								
								
									
										77
									
								
								vendor/github.com/uber/jaeger-client-go/baggage_setter.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										77
									
								
								vendor/github.com/uber/jaeger-client-go/baggage_setter.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,77 @@ | ||||
| // Copyright (c) 2017 Uber Technologies, Inc. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package jaeger | ||||
|  | ||||
| import ( | ||||
| 	"github.com/opentracing/opentracing-go/log" | ||||
|  | ||||
| 	"github.com/uber/jaeger-client-go/internal/baggage" | ||||
| ) | ||||
|  | ||||
| // baggageSetter is an actor that can set a baggage value on a Span given certain | ||||
| // restrictions (eg. maxValueLength). | ||||
| type baggageSetter struct { | ||||
| 	restrictionManager baggage.RestrictionManager | ||||
| 	metrics            *Metrics | ||||
| } | ||||
|  | ||||
| func newBaggageSetter(restrictionManager baggage.RestrictionManager, metrics *Metrics) *baggageSetter { | ||||
| 	return &baggageSetter{ | ||||
| 		restrictionManager: restrictionManager, | ||||
| 		metrics:            metrics, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // (NB) span should hold the lock before making this call | ||||
| func (s *baggageSetter) setBaggage(span *Span, key, value string) { | ||||
| 	var truncated bool | ||||
| 	var prevItem string | ||||
| 	restriction := s.restrictionManager.GetRestriction(span.serviceName(), key) | ||||
| 	if !restriction.KeyAllowed() { | ||||
| 		s.logFields(span, key, value, prevItem, truncated, restriction.KeyAllowed()) | ||||
| 		s.metrics.BaggageUpdateFailure.Inc(1) | ||||
| 		return | ||||
| 	} | ||||
| 	if len(value) > restriction.MaxValueLength() { | ||||
| 		truncated = true | ||||
| 		value = value[:restriction.MaxValueLength()] | ||||
| 		s.metrics.BaggageTruncate.Inc(1) | ||||
| 	} | ||||
| 	prevItem = span.context.baggage[key] | ||||
| 	s.logFields(span, key, value, prevItem, truncated, restriction.KeyAllowed()) | ||||
| 	span.context = span.context.WithBaggageItem(key, value) | ||||
| 	s.metrics.BaggageUpdateSuccess.Inc(1) | ||||
| } | ||||
|  | ||||
| func (s *baggageSetter) logFields(span *Span, key, value, prevItem string, truncated, valid bool) { | ||||
| 	if !span.context.IsSampled() { | ||||
| 		return | ||||
| 	} | ||||
| 	fields := []log.Field{ | ||||
| 		log.String("event", "baggage"), | ||||
| 		log.String("key", key), | ||||
| 		log.String("value", value), | ||||
| 	} | ||||
| 	if prevItem != "" { | ||||
| 		fields = append(fields, log.String("override", "true")) | ||||
| 	} | ||||
| 	if truncated { | ||||
| 		fields = append(fields, log.String("truncated", "true")) | ||||
| 	} | ||||
| 	if !valid { | ||||
| 		fields = append(fields, log.String("invalid", "true")) | ||||
| 	} | ||||
| 	span.logFieldsNoLocking(fields...) | ||||
| } | ||||
							
								
								
									
										373
									
								
								vendor/github.com/uber/jaeger-client-go/config/config.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										373
									
								
								vendor/github.com/uber/jaeger-client-go/config/config.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,373 @@ | ||||
| // Copyright (c) 2017-2018 Uber Technologies, Inc. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package config | ||||
|  | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"io" | ||||
| 	"strings" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/opentracing/opentracing-go" | ||||
|  | ||||
| 	"github.com/uber/jaeger-client-go" | ||||
| 	"github.com/uber/jaeger-client-go/internal/baggage/remote" | ||||
| 	throttler "github.com/uber/jaeger-client-go/internal/throttler/remote" | ||||
| 	"github.com/uber/jaeger-client-go/rpcmetrics" | ||||
| ) | ||||
|  | ||||
| const defaultSamplingProbability = 0.001 | ||||
|  | ||||
| // Configuration configures and creates Jaeger Tracer | ||||
| type Configuration struct { | ||||
| 	// ServiceName specifies the service name to use on the tracer. | ||||
| 	// Can be provided via environment variable named JAEGER_SERVICE_NAME | ||||
| 	ServiceName string `yaml:"serviceName"` | ||||
|  | ||||
| 	// Disabled can be provided via environment variable named JAEGER_DISABLED | ||||
| 	Disabled bool `yaml:"disabled"` | ||||
|  | ||||
| 	// RPCMetrics can be provided via environment variable named JAEGER_RPC_METRICS | ||||
| 	RPCMetrics bool `yaml:"rpc_metrics"` | ||||
|  | ||||
| 	// Tags can be provided via environment variable named JAEGER_TAGS | ||||
| 	Tags []opentracing.Tag `yaml:"tags"` | ||||
|  | ||||
| 	Sampler             *SamplerConfig             `yaml:"sampler"` | ||||
| 	Reporter            *ReporterConfig            `yaml:"reporter"` | ||||
| 	Headers             *jaeger.HeadersConfig      `yaml:"headers"` | ||||
| 	BaggageRestrictions *BaggageRestrictionsConfig `yaml:"baggage_restrictions"` | ||||
| 	Throttler           *ThrottlerConfig           `yaml:"throttler"` | ||||
| } | ||||
|  | ||||
| // SamplerConfig allows initializing a non-default sampler.  All fields are optional. | ||||
| type SamplerConfig struct { | ||||
| 	// Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote | ||||
| 	// Can be set by exporting an environment variable named JAEGER_SAMPLER_TYPE | ||||
| 	Type string `yaml:"type"` | ||||
|  | ||||
| 	// Param is a value passed to the sampler. | ||||
| 	// Valid values for Param field are: | ||||
| 	// - for "const" sampler, 0 or 1 for always false/true respectively | ||||
| 	// - for "probabilistic" sampler, a probability between 0 and 1 | ||||
| 	// - for "rateLimiting" sampler, the number of spans per second | ||||
| 	// - for "remote" sampler, param is the same as for "probabilistic" | ||||
| 	//   and indicates the initial sampling rate before the actual one | ||||
| 	//   is received from the mothership. | ||||
| 	// Can be set by exporting an environment variable named JAEGER_SAMPLER_PARAM | ||||
| 	Param float64 `yaml:"param"` | ||||
|  | ||||
| 	// SamplingServerURL is the address of jaeger-agent's HTTP sampling server | ||||
| 	// Can be set by exporting an environment variable named JAEGER_SAMPLER_MANAGER_HOST_PORT | ||||
| 	SamplingServerURL string `yaml:"samplingServerURL"` | ||||
|  | ||||
| 	// MaxOperations is the maximum number of operations that the sampler | ||||
| 	// will keep track of. If an operation is not tracked, a default probabilistic | ||||
| 	// sampler will be used rather than the per operation specific sampler. | ||||
| 	// Can be set by exporting an environment variable named JAEGER_SAMPLER_MAX_OPERATIONS | ||||
| 	MaxOperations int `yaml:"maxOperations"` | ||||
|  | ||||
| 	// SamplingRefreshInterval controls how often the remotely controlled sampler will poll | ||||
| 	// jaeger-agent for the appropriate sampling strategy. | ||||
| 	// Can be set by exporting an environment variable named JAEGER_SAMPLER_REFRESH_INTERVAL | ||||
| 	SamplingRefreshInterval time.Duration `yaml:"samplingRefreshInterval"` | ||||
| } | ||||
|  | ||||
| // ReporterConfig configures the reporter. All fields are optional. | ||||
| type ReporterConfig struct { | ||||
| 	// QueueSize controls how many spans the reporter can keep in memory before it starts dropping | ||||
| 	// new spans. The queue is continuously drained by a background go-routine, as fast as spans | ||||
| 	// can be sent out of process. | ||||
| 	// Can be set by exporting an environment variable named JAEGER_REPORTER_MAX_QUEUE_SIZE | ||||
| 	QueueSize int `yaml:"queueSize"` | ||||
|  | ||||
| 	// BufferFlushInterval controls how often the buffer is force-flushed, even if it's not full. | ||||
| 	// It is generally not useful, as it only matters for very low traffic services. | ||||
| 	// Can be set by exporting an environment variable named JAEGER_REPORTER_FLUSH_INTERVAL | ||||
| 	BufferFlushInterval time.Duration | ||||
|  | ||||
| 	// LogSpans, when true, enables LoggingReporter that runs in parallel with the main reporter | ||||
| 	// and logs all submitted spans. Main Configuration.Logger must be initialized in the code | ||||
| 	// for this option to have any effect. | ||||
| 	// Can be set by exporting an environment variable named JAEGER_REPORTER_LOG_SPANS | ||||
| 	LogSpans bool `yaml:"logSpans"` | ||||
|  | ||||
| 	// LocalAgentHostPort instructs reporter to send spans to jaeger-agent at this address | ||||
| 	// Can be set by exporting an environment variable named JAEGER_AGENT_HOST / JAEGER_AGENT_PORT | ||||
| 	LocalAgentHostPort string `yaml:"localAgentHostPort"` | ||||
| } | ||||
|  | ||||
| // BaggageRestrictionsConfig configures the baggage restrictions manager which can be used to whitelist | ||||
| // certain baggage keys. All fields are optional. | ||||
| type BaggageRestrictionsConfig struct { | ||||
| 	// DenyBaggageOnInitializationFailure controls the startup failure mode of the baggage restriction | ||||
| 	// manager. If true, the manager will not allow any baggage to be written until baggage restrictions have | ||||
| 	// been retrieved from jaeger-agent. If false, the manager wil allow any baggage to be written until baggage | ||||
| 	// restrictions have been retrieved from jaeger-agent. | ||||
| 	DenyBaggageOnInitializationFailure bool `yaml:"denyBaggageOnInitializationFailure"` | ||||
|  | ||||
| 	// HostPort is the hostPort of jaeger-agent's baggage restrictions server | ||||
| 	HostPort string `yaml:"hostPort"` | ||||
|  | ||||
| 	// RefreshInterval controls how often the baggage restriction manager will poll | ||||
| 	// jaeger-agent for the most recent baggage restrictions. | ||||
| 	RefreshInterval time.Duration `yaml:"refreshInterval"` | ||||
| } | ||||
|  | ||||
| // ThrottlerConfig configures the throttler which can be used to throttle the | ||||
| // rate at which the client may send debug requests. | ||||
| type ThrottlerConfig struct { | ||||
| 	// HostPort of jaeger-agent's credit server. | ||||
| 	HostPort string `yaml:"hostPort"` | ||||
|  | ||||
| 	// RefreshInterval controls how often the throttler will poll jaeger-agent | ||||
| 	// for more throttling credits. | ||||
| 	RefreshInterval time.Duration `yaml:"refreshInterval"` | ||||
|  | ||||
| 	// SynchronousInitialization determines whether or not the throttler should | ||||
| 	// synchronously fetch credits from the agent when an operation is seen for | ||||
| 	// the first time. This should be set to true if the client will be used by | ||||
| 	// a short lived service that needs to ensure that credits are fetched | ||||
| 	// upfront such that sampling or throttling occurs. | ||||
| 	SynchronousInitialization bool `yaml:"synchronousInitialization"` | ||||
| } | ||||
|  | ||||
| type nullCloser struct{} | ||||
|  | ||||
| func (*nullCloser) Close() error { return nil } | ||||
|  | ||||
| // New creates a new Jaeger Tracer, and a closer func that can be used to flush buffers | ||||
| // before shutdown. | ||||
| // | ||||
| // Deprecated: use NewTracer() function | ||||
| func (c Configuration) New( | ||||
| 	serviceName string, | ||||
| 	options ...Option, | ||||
| ) (opentracing.Tracer, io.Closer, error) { | ||||
| 	if serviceName != "" { | ||||
| 		c.ServiceName = serviceName | ||||
| 	} | ||||
|  | ||||
| 	return c.NewTracer(options...) | ||||
| } | ||||
|  | ||||
| // NewTracer returns a new tracer based on the current configuration, using the given options, | ||||
| // and a closer func that can be used to flush buffers before shutdown. | ||||
| func (c Configuration) NewTracer(options ...Option) (opentracing.Tracer, io.Closer, error) { | ||||
| 	if c.ServiceName == "" { | ||||
| 		return nil, nil, errors.New("no service name provided") | ||||
| 	} | ||||
|  | ||||
| 	if c.Disabled { | ||||
| 		return &opentracing.NoopTracer{}, &nullCloser{}, nil | ||||
| 	} | ||||
| 	opts := applyOptions(options...) | ||||
| 	tracerMetrics := jaeger.NewMetrics(opts.metrics, nil) | ||||
| 	if c.RPCMetrics { | ||||
| 		Observer( | ||||
| 			rpcmetrics.NewObserver( | ||||
| 				opts.metrics.Namespace("jaeger-rpc", map[string]string{"component": "jaeger"}), | ||||
| 				rpcmetrics.DefaultNameNormalizer, | ||||
| 			), | ||||
| 		)(&opts) // adds to c.observers | ||||
| 	} | ||||
| 	if c.Sampler == nil { | ||||
| 		c.Sampler = &SamplerConfig{ | ||||
| 			Type:  jaeger.SamplerTypeRemote, | ||||
| 			Param: defaultSamplingProbability, | ||||
| 		} | ||||
| 	} | ||||
| 	if c.Reporter == nil { | ||||
| 		c.Reporter = &ReporterConfig{} | ||||
| 	} | ||||
|  | ||||
| 	sampler := opts.sampler | ||||
| 	if sampler == nil { | ||||
| 		s, err := c.Sampler.NewSampler(c.ServiceName, tracerMetrics) | ||||
| 		if err != nil { | ||||
| 			return nil, nil, err | ||||
| 		} | ||||
| 		sampler = s | ||||
| 	} | ||||
|  | ||||
| 	reporter := opts.reporter | ||||
| 	if reporter == nil { | ||||
| 		r, err := c.Reporter.NewReporter(c.ServiceName, tracerMetrics, opts.logger) | ||||
| 		if err != nil { | ||||
| 			return nil, nil, err | ||||
| 		} | ||||
| 		reporter = r | ||||
| 	} | ||||
|  | ||||
| 	tracerOptions := []jaeger.TracerOption{ | ||||
| 		jaeger.TracerOptions.Metrics(tracerMetrics), | ||||
| 		jaeger.TracerOptions.Logger(opts.logger), | ||||
| 		jaeger.TracerOptions.CustomHeaderKeys(c.Headers), | ||||
| 		jaeger.TracerOptions.Gen128Bit(opts.gen128Bit), | ||||
| 		jaeger.TracerOptions.ZipkinSharedRPCSpan(opts.zipkinSharedRPCSpan), | ||||
| 		jaeger.TracerOptions.MaxTagValueLength(opts.maxTagValueLength), | ||||
| 	} | ||||
|  | ||||
| 	for _, tag := range opts.tags { | ||||
| 		tracerOptions = append(tracerOptions, jaeger.TracerOptions.Tag(tag.Key, tag.Value)) | ||||
| 	} | ||||
|  | ||||
| 	for _, tag := range c.Tags { | ||||
| 		tracerOptions = append(tracerOptions, jaeger.TracerOptions.Tag(tag.Key, tag.Value)) | ||||
| 	} | ||||
|  | ||||
| 	for _, obs := range opts.observers { | ||||
| 		tracerOptions = append(tracerOptions, jaeger.TracerOptions.Observer(obs)) | ||||
| 	} | ||||
|  | ||||
| 	for _, cobs := range opts.contribObservers { | ||||
| 		tracerOptions = append(tracerOptions, jaeger.TracerOptions.ContribObserver(cobs)) | ||||
| 	} | ||||
|  | ||||
| 	for format, injector := range opts.injectors { | ||||
| 		tracerOptions = append(tracerOptions, jaeger.TracerOptions.Injector(format, injector)) | ||||
| 	} | ||||
|  | ||||
| 	for format, extractor := range opts.extractors { | ||||
| 		tracerOptions = append(tracerOptions, jaeger.TracerOptions.Extractor(format, extractor)) | ||||
| 	} | ||||
|  | ||||
| 	if c.BaggageRestrictions != nil { | ||||
| 		mgr := remote.NewRestrictionManager( | ||||
| 			c.ServiceName, | ||||
| 			remote.Options.Metrics(tracerMetrics), | ||||
| 			remote.Options.Logger(opts.logger), | ||||
| 			remote.Options.HostPort(c.BaggageRestrictions.HostPort), | ||||
| 			remote.Options.RefreshInterval(c.BaggageRestrictions.RefreshInterval), | ||||
| 			remote.Options.DenyBaggageOnInitializationFailure( | ||||
| 				c.BaggageRestrictions.DenyBaggageOnInitializationFailure, | ||||
| 			), | ||||
| 		) | ||||
| 		tracerOptions = append(tracerOptions, jaeger.TracerOptions.BaggageRestrictionManager(mgr)) | ||||
| 	} | ||||
|  | ||||
| 	if c.Throttler != nil { | ||||
| 		debugThrottler := throttler.NewThrottler( | ||||
| 			c.ServiceName, | ||||
| 			throttler.Options.Metrics(tracerMetrics), | ||||
| 			throttler.Options.Logger(opts.logger), | ||||
| 			throttler.Options.HostPort(c.Throttler.HostPort), | ||||
| 			throttler.Options.RefreshInterval(c.Throttler.RefreshInterval), | ||||
| 			throttler.Options.SynchronousInitialization( | ||||
| 				c.Throttler.SynchronousInitialization, | ||||
| 			), | ||||
| 		) | ||||
|  | ||||
| 		tracerOptions = append(tracerOptions, jaeger.TracerOptions.DebugThrottler(debugThrottler)) | ||||
| 	} | ||||
|  | ||||
| 	tracer, closer := jaeger.NewTracer( | ||||
| 		c.ServiceName, | ||||
| 		sampler, | ||||
| 		reporter, | ||||
| 		tracerOptions..., | ||||
| 	) | ||||
|  | ||||
| 	return tracer, closer, nil | ||||
| } | ||||
|  | ||||
| // InitGlobalTracer creates a new Jaeger Tracer, and sets it as global OpenTracing Tracer. | ||||
| // It returns a closer func that can be used to flush buffers before shutdown. | ||||
| func (c Configuration) InitGlobalTracer( | ||||
| 	serviceName string, | ||||
| 	options ...Option, | ||||
| ) (io.Closer, error) { | ||||
| 	if c.Disabled { | ||||
| 		return &nullCloser{}, nil | ||||
| 	} | ||||
| 	tracer, closer, err := c.New(serviceName, options...) | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	opentracing.SetGlobalTracer(tracer) | ||||
| 	return closer, nil | ||||
| } | ||||
|  | ||||
| // NewSampler creates a new sampler based on the configuration | ||||
| func (sc *SamplerConfig) NewSampler( | ||||
| 	serviceName string, | ||||
| 	metrics *jaeger.Metrics, | ||||
| ) (jaeger.Sampler, error) { | ||||
| 	samplerType := strings.ToLower(sc.Type) | ||||
| 	if samplerType == jaeger.SamplerTypeConst { | ||||
| 		return jaeger.NewConstSampler(sc.Param != 0), nil | ||||
| 	} | ||||
| 	if samplerType == jaeger.SamplerTypeProbabilistic { | ||||
| 		if sc.Param >= 0 && sc.Param <= 1.0 { | ||||
| 			return jaeger.NewProbabilisticSampler(sc.Param) | ||||
| 		} | ||||
| 		return nil, fmt.Errorf( | ||||
| 			"Invalid Param for probabilistic sampler: %v. Expecting value between 0 and 1", | ||||
| 			sc.Param, | ||||
| 		) | ||||
| 	} | ||||
| 	if samplerType == jaeger.SamplerTypeRateLimiting { | ||||
| 		return jaeger.NewRateLimitingSampler(sc.Param), nil | ||||
| 	} | ||||
| 	if samplerType == jaeger.SamplerTypeRemote || sc.Type == "" { | ||||
| 		sc2 := *sc | ||||
| 		sc2.Type = jaeger.SamplerTypeProbabilistic | ||||
| 		initSampler, err := sc2.NewSampler(serviceName, nil) | ||||
| 		if err != nil { | ||||
| 			return nil, err | ||||
| 		} | ||||
| 		options := []jaeger.SamplerOption{ | ||||
| 			jaeger.SamplerOptions.Metrics(metrics), | ||||
| 			jaeger.SamplerOptions.InitialSampler(initSampler), | ||||
| 			jaeger.SamplerOptions.SamplingServerURL(sc.SamplingServerURL), | ||||
| 		} | ||||
| 		if sc.MaxOperations != 0 { | ||||
| 			options = append(options, jaeger.SamplerOptions.MaxOperations(sc.MaxOperations)) | ||||
| 		} | ||||
| 		if sc.SamplingRefreshInterval != 0 { | ||||
| 			options = append(options, jaeger.SamplerOptions.SamplingRefreshInterval(sc.SamplingRefreshInterval)) | ||||
| 		} | ||||
| 		return jaeger.NewRemotelyControlledSampler(serviceName, options...), nil | ||||
| 	} | ||||
| 	return nil, fmt.Errorf("Unknown sampler type %v", sc.Type) | ||||
| } | ||||
|  | ||||
| // NewReporter instantiates a new reporter that submits spans to tcollector | ||||
| func (rc *ReporterConfig) NewReporter( | ||||
| 	serviceName string, | ||||
| 	metrics *jaeger.Metrics, | ||||
| 	logger jaeger.Logger, | ||||
| ) (jaeger.Reporter, error) { | ||||
| 	sender, err := rc.newTransport() | ||||
| 	if err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	reporter := jaeger.NewRemoteReporter( | ||||
| 		sender, | ||||
| 		jaeger.ReporterOptions.QueueSize(rc.QueueSize), | ||||
| 		jaeger.ReporterOptions.BufferFlushInterval(rc.BufferFlushInterval), | ||||
| 		jaeger.ReporterOptions.Logger(logger), | ||||
| 		jaeger.ReporterOptions.Metrics(metrics)) | ||||
| 	if rc.LogSpans && logger != nil { | ||||
| 		logger.Infof("Initializing logging reporter\n") | ||||
| 		reporter = jaeger.NewCompositeReporter(jaeger.NewLoggingReporter(logger), reporter) | ||||
| 	} | ||||
| 	return reporter, err | ||||
| } | ||||
|  | ||||
| func (rc *ReporterConfig) newTransport() (jaeger.Transport, error) { | ||||
| 	return jaeger.NewUDPTransport(rc.LocalAgentHostPort, 0) | ||||
| } | ||||
							
								
								
									
										205
									
								
								vendor/github.com/uber/jaeger-client-go/config/config_env.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										205
									
								
								vendor/github.com/uber/jaeger-client-go/config/config_env.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,205 @@ | ||||
| // Copyright (c) 2018 The Jaeger Authors. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package config | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"os" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| 	"time" | ||||
|  | ||||
| 	opentracing "github.com/opentracing/opentracing-go" | ||||
| 	"github.com/pkg/errors" | ||||
|  | ||||
| 	"github.com/uber/jaeger-client-go" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	// environment variable names | ||||
| 	envServiceName            = "JAEGER_SERVICE_NAME" | ||||
| 	envDisabled               = "JAEGER_DISABLED" | ||||
| 	envRPCMetrics             = "JAEGER_RPC_METRICS" | ||||
| 	envTags                   = "JAEGER_TAGS" | ||||
| 	envSamplerType            = "JAEGER_SAMPLER_TYPE" | ||||
| 	envSamplerParam           = "JAEGER_SAMPLER_PARAM" | ||||
| 	envSamplerManagerHostPort = "JAEGER_SAMPLER_MANAGER_HOST_PORT" | ||||
| 	envSamplerMaxOperations   = "JAEGER_SAMPLER_MAX_OPERATIONS" | ||||
| 	envSamplerRefreshInterval = "JAEGER_SAMPLER_REFRESH_INTERVAL" | ||||
| 	envReporterMaxQueueSize   = "JAEGER_REPORTER_MAX_QUEUE_SIZE" | ||||
| 	envReporterFlushInterval  = "JAEGER_REPORTER_FLUSH_INTERVAL" | ||||
| 	envReporterLogSpans       = "JAEGER_REPORTER_LOG_SPANS" | ||||
| 	envAgentHost              = "JAEGER_AGENT_HOST" | ||||
| 	envAgentPort              = "JAEGER_AGENT_PORT" | ||||
| ) | ||||
|  | ||||
| // FromEnv uses environment variables to set the tracer's Configuration | ||||
| func FromEnv() (*Configuration, error) { | ||||
| 	c := &Configuration{} | ||||
|  | ||||
| 	if e := os.Getenv(envServiceName); e != "" { | ||||
| 		c.ServiceName = e | ||||
| 	} | ||||
|  | ||||
| 	if e := os.Getenv(envRPCMetrics); e != "" { | ||||
| 		if value, err := strconv.ParseBool(e); err == nil { | ||||
| 			c.RPCMetrics = value | ||||
| 		} else { | ||||
| 			return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envRPCMetrics, e) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if e := os.Getenv(envDisabled); e != "" { | ||||
| 		if value, err := strconv.ParseBool(e); err == nil { | ||||
| 			c.Disabled = value | ||||
| 		} else { | ||||
| 			return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envDisabled, e) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if e := os.Getenv(envTags); e != "" { | ||||
| 		c.Tags = parseTags(e) | ||||
| 	} | ||||
|  | ||||
| 	if s, err := samplerConfigFromEnv(); err == nil { | ||||
| 		c.Sampler = s | ||||
| 	} else { | ||||
| 		return nil, errors.Wrap(err, "cannot obtain sampler config from env") | ||||
| 	} | ||||
|  | ||||
| 	if r, err := reporterConfigFromEnv(); err == nil { | ||||
| 		c.Reporter = r | ||||
| 	} else { | ||||
| 		return nil, errors.Wrap(err, "cannot obtain reporter config from env") | ||||
| 	} | ||||
|  | ||||
| 	return c, nil | ||||
| } | ||||
|  | ||||
| // samplerConfigFromEnv creates a new SamplerConfig based on the environment variables | ||||
| func samplerConfigFromEnv() (*SamplerConfig, error) { | ||||
| 	sc := &SamplerConfig{} | ||||
|  | ||||
| 	if e := os.Getenv(envSamplerType); e != "" { | ||||
| 		sc.Type = e | ||||
| 	} | ||||
|  | ||||
| 	if e := os.Getenv(envSamplerParam); e != "" { | ||||
| 		if value, err := strconv.ParseFloat(e, 64); err == nil { | ||||
| 			sc.Param = value | ||||
| 		} else { | ||||
| 			return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerParam, e) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if e := os.Getenv(envSamplerManagerHostPort); e != "" { | ||||
| 		sc.SamplingServerURL = e | ||||
| 	} | ||||
|  | ||||
| 	if e := os.Getenv(envSamplerMaxOperations); e != "" { | ||||
| 		if value, err := strconv.ParseInt(e, 10, 0); err == nil { | ||||
| 			sc.MaxOperations = int(value) | ||||
| 		} else { | ||||
| 			return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerMaxOperations, e) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if e := os.Getenv(envSamplerRefreshInterval); e != "" { | ||||
| 		if value, err := time.ParseDuration(e); err == nil { | ||||
| 			sc.SamplingRefreshInterval = value | ||||
| 		} else { | ||||
| 			return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envSamplerRefreshInterval, e) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	return sc, nil | ||||
| } | ||||
|  | ||||
| // reporterConfigFromEnv creates a new ReporterConfig based on the environment variables | ||||
| func reporterConfigFromEnv() (*ReporterConfig, error) { | ||||
| 	rc := &ReporterConfig{} | ||||
|  | ||||
| 	if e := os.Getenv(envReporterMaxQueueSize); e != "" { | ||||
| 		if value, err := strconv.ParseInt(e, 10, 0); err == nil { | ||||
| 			rc.QueueSize = int(value) | ||||
| 		} else { | ||||
| 			return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterMaxQueueSize, e) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if e := os.Getenv(envReporterFlushInterval); e != "" { | ||||
| 		if value, err := time.ParseDuration(e); err == nil { | ||||
| 			rc.BufferFlushInterval = value | ||||
| 		} else { | ||||
| 			return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterFlushInterval, e) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	if e := os.Getenv(envReporterLogSpans); e != "" { | ||||
| 		if value, err := strconv.ParseBool(e); err == nil { | ||||
| 			rc.LogSpans = value | ||||
| 		} else { | ||||
| 			return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envReporterLogSpans, e) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	host := jaeger.DefaultUDPSpanServerHost | ||||
| 	if e := os.Getenv(envAgentHost); e != "" { | ||||
| 		host = e | ||||
| 	} | ||||
|  | ||||
| 	port := jaeger.DefaultUDPSpanServerPort | ||||
| 	if e := os.Getenv(envAgentPort); e != "" { | ||||
| 		if value, err := strconv.ParseInt(e, 10, 0); err == nil { | ||||
| 			port = int(value) | ||||
| 		} else { | ||||
| 			return nil, errors.Wrapf(err, "cannot parse env var %s=%s", envAgentPort, e) | ||||
| 		} | ||||
| 	} | ||||
|  | ||||
| 	// the side effect of this is that we are building the default value, even if none of the env vars | ||||
| 	// were not explicitly passed | ||||
| 	rc.LocalAgentHostPort = fmt.Sprintf("%s:%d", host, port) | ||||
|  | ||||
| 	return rc, nil | ||||
| } | ||||
|  | ||||
| // parseTags parses the given string into a collection of Tags. | ||||
| // Spec for this value: | ||||
| // - comma separated list of key=value | ||||
| // - value can be specified using the notation ${envVar:defaultValue}, where `envVar` | ||||
| // is an environment variable and `defaultValue` is the value to use in case the env var is not set | ||||
| func parseTags(sTags string) []opentracing.Tag { | ||||
| 	pairs := strings.Split(sTags, ",") | ||||
| 	tags := make([]opentracing.Tag, 0) | ||||
| 	for _, p := range pairs { | ||||
| 		kv := strings.SplitN(p, "=", 2) | ||||
| 		k, v := strings.TrimSpace(kv[0]), strings.TrimSpace(kv[1]) | ||||
|  | ||||
| 		if strings.HasPrefix(v, "${") && strings.HasSuffix(v, "}") { | ||||
| 			ed := strings.SplitN(v[2:len(v)-1], ":", 2) | ||||
| 			e, d := ed[0], ed[1] | ||||
| 			v = os.Getenv(e) | ||||
| 			if v == "" && d != "" { | ||||
| 				v = d | ||||
| 			} | ||||
| 		} | ||||
|  | ||||
| 		tag := opentracing.Tag{Key: k, Value: v} | ||||
| 		tags = append(tags, tag) | ||||
| 	} | ||||
|  | ||||
| 	return tags | ||||
| } | ||||
							
								
								
									
										148
									
								
								vendor/github.com/uber/jaeger-client-go/config/options.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										148
									
								
								vendor/github.com/uber/jaeger-client-go/config/options.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,148 @@ | ||||
| // Copyright (c) 2017 Uber Technologies, Inc. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package config | ||||
|  | ||||
| import ( | ||||
| 	opentracing "github.com/opentracing/opentracing-go" | ||||
| 	"github.com/uber/jaeger-lib/metrics" | ||||
|  | ||||
| 	"github.com/uber/jaeger-client-go" | ||||
| ) | ||||
|  | ||||
| // Option is a function that sets some option on the client. | ||||
| type Option func(c *Options) | ||||
|  | ||||
| // Options control behavior of the client. | ||||
| type Options struct { | ||||
| 	metrics             metrics.Factory | ||||
| 	logger              jaeger.Logger | ||||
| 	reporter            jaeger.Reporter | ||||
| 	sampler             jaeger.Sampler | ||||
| 	contribObservers    []jaeger.ContribObserver | ||||
| 	observers           []jaeger.Observer | ||||
| 	gen128Bit           bool | ||||
| 	zipkinSharedRPCSpan bool | ||||
| 	maxTagValueLength   int | ||||
| 	tags                []opentracing.Tag | ||||
| 	injectors           map[interface{}]jaeger.Injector | ||||
| 	extractors          map[interface{}]jaeger.Extractor | ||||
| } | ||||
|  | ||||
| // Metrics creates an Option that initializes Metrics in the tracer, | ||||
| // which is used to emit statistics about spans. | ||||
| func Metrics(factory metrics.Factory) Option { | ||||
| 	return func(c *Options) { | ||||
| 		c.metrics = factory | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Logger can be provided to log Reporter errors, as well as to log spans | ||||
| // if Reporter.LogSpans is set to true. | ||||
| func Logger(logger jaeger.Logger) Option { | ||||
| 	return func(c *Options) { | ||||
| 		c.logger = logger | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Reporter can be provided explicitly to override the configuration. | ||||
| // Useful for testing, e.g. by passing InMemoryReporter. | ||||
| func Reporter(reporter jaeger.Reporter) Option { | ||||
| 	return func(c *Options) { | ||||
| 		c.reporter = reporter | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Sampler can be provided explicitly to override the configuration. | ||||
| func Sampler(sampler jaeger.Sampler) Option { | ||||
| 	return func(c *Options) { | ||||
| 		c.sampler = sampler | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Observer can be registered with the Tracer to receive notifications about new Spans. | ||||
| func Observer(observer jaeger.Observer) Option { | ||||
| 	return func(c *Options) { | ||||
| 		c.observers = append(c.observers, observer) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // ContribObserver can be registered with the Tracer to recieve notifications | ||||
| // about new spans. | ||||
| func ContribObserver(observer jaeger.ContribObserver) Option { | ||||
| 	return func(c *Options) { | ||||
| 		c.contribObservers = append(c.contribObservers, observer) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Gen128Bit specifies whether to generate 128bit trace IDs. | ||||
| func Gen128Bit(gen128Bit bool) Option { | ||||
| 	return func(c *Options) { | ||||
| 		c.gen128Bit = gen128Bit | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // ZipkinSharedRPCSpan creates an option that enables sharing span ID between client | ||||
| // and server spans a la zipkin. If false, client and server spans will be assigned | ||||
| // different IDs. | ||||
| func ZipkinSharedRPCSpan(zipkinSharedRPCSpan bool) Option { | ||||
| 	return func(c *Options) { | ||||
| 		c.zipkinSharedRPCSpan = zipkinSharedRPCSpan | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // MaxTagValueLength can be provided to override the default max tag value length. | ||||
| func MaxTagValueLength(maxTagValueLength int) Option { | ||||
| 	return func(c *Options) { | ||||
| 		c.maxTagValueLength = maxTagValueLength | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Tag creates an option that adds a tracer-level tag. | ||||
| func Tag(key string, value interface{}) Option { | ||||
| 	return func(c *Options) { | ||||
| 		c.tags = append(c.tags, opentracing.Tag{Key: key, Value: value}) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Injector registers an Injector with the given format. | ||||
| func Injector(format interface{}, injector jaeger.Injector) Option { | ||||
| 	return func(c *Options) { | ||||
| 		c.injectors[format] = injector | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Extractor registers an Extractor with the given format. | ||||
| func Extractor(format interface{}, extractor jaeger.Extractor) Option { | ||||
| 	return func(c *Options) { | ||||
| 		c.extractors[format] = extractor | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func applyOptions(options ...Option) Options { | ||||
| 	opts := Options{ | ||||
| 		injectors:  make(map[interface{}]jaeger.Injector), | ||||
| 		extractors: make(map[interface{}]jaeger.Extractor), | ||||
| 	} | ||||
| 	for _, option := range options { | ||||
| 		option(&opts) | ||||
| 	} | ||||
| 	if opts.metrics == nil { | ||||
| 		opts.metrics = metrics.NullFactory | ||||
| 	} | ||||
| 	if opts.logger == nil { | ||||
| 		opts.logger = jaeger.NullLogger | ||||
| 	} | ||||
| 	return opts | ||||
| } | ||||
							
								
								
									
										88
									
								
								vendor/github.com/uber/jaeger-client-go/constants.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										88
									
								
								vendor/github.com/uber/jaeger-client-go/constants.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,88 @@ | ||||
| // Copyright (c) 2017 Uber Technologies, Inc. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package jaeger | ||||
|  | ||||
| const ( | ||||
| 	// JaegerClientVersion is the version of the client library reported as Span tag. | ||||
| 	JaegerClientVersion = "Go-2.15.0-dev" | ||||
|  | ||||
| 	// JaegerClientVersionTagKey is the name of the tag used to report client version. | ||||
| 	JaegerClientVersionTagKey = "jaeger.version" | ||||
|  | ||||
| 	// JaegerDebugHeader is the name of HTTP header or a TextMap carrier key which, | ||||
| 	// if found in the carrier, forces the trace to be sampled as "debug" trace. | ||||
| 	// The value of the header is recorded as the tag on the root span, so that the | ||||
| 	// trace can be found in the UI using this value as a correlation ID. | ||||
| 	JaegerDebugHeader = "jaeger-debug-id" | ||||
|  | ||||
| 	// JaegerBaggageHeader is the name of the HTTP header that is used to submit baggage. | ||||
| 	// It differs from TraceBaggageHeaderPrefix in that it can be used only in cases where | ||||
| 	// a root span does not exist. | ||||
| 	JaegerBaggageHeader = "jaeger-baggage" | ||||
|  | ||||
| 	// TracerHostnameTagKey used to report host name of the process. | ||||
| 	TracerHostnameTagKey = "hostname" | ||||
|  | ||||
| 	// TracerIPTagKey used to report ip of the process. | ||||
| 	TracerIPTagKey = "ip" | ||||
|  | ||||
| 	// TracerUUIDTagKey used to report UUID of the client process. | ||||
| 	TracerUUIDTagKey = "client-uuid" | ||||
|  | ||||
| 	// SamplerTypeTagKey reports which sampler was used on the root span. | ||||
| 	SamplerTypeTagKey = "sampler.type" | ||||
|  | ||||
| 	// SamplerParamTagKey reports the parameter of the sampler, like sampling probability. | ||||
| 	SamplerParamTagKey = "sampler.param" | ||||
|  | ||||
| 	// TraceContextHeaderName is the http header name used to propagate tracing context. | ||||
| 	// This must be in lower-case to avoid mismatches when decoding incoming headers. | ||||
| 	TraceContextHeaderName = "uber-trace-id" | ||||
|  | ||||
| 	// TracerStateHeaderName is deprecated. | ||||
| 	// Deprecated: use TraceContextHeaderName | ||||
| 	TracerStateHeaderName = TraceContextHeaderName | ||||
|  | ||||
| 	// TraceBaggageHeaderPrefix is the prefix for http headers used to propagate baggage. | ||||
| 	// This must be in lower-case to avoid mismatches when decoding incoming headers. | ||||
| 	TraceBaggageHeaderPrefix = "uberctx-" | ||||
|  | ||||
| 	// SamplerTypeConst is the type of sampler that always makes the same decision. | ||||
| 	SamplerTypeConst = "const" | ||||
|  | ||||
| 	// SamplerTypeRemote is the type of sampler that polls Jaeger agent for sampling strategy. | ||||
| 	SamplerTypeRemote = "remote" | ||||
|  | ||||
| 	// SamplerTypeProbabilistic is the type of sampler that samples traces | ||||
| 	// with a certain fixed probability. | ||||
| 	SamplerTypeProbabilistic = "probabilistic" | ||||
|  | ||||
| 	// SamplerTypeRateLimiting is the type of sampler that samples | ||||
| 	// only up to a fixed number of traces per second. | ||||
| 	SamplerTypeRateLimiting = "ratelimiting" | ||||
|  | ||||
| 	// SamplerTypeLowerBound is the type of sampler that samples | ||||
| 	// at least a fixed number of traces per second. | ||||
| 	SamplerTypeLowerBound = "lowerbound" | ||||
|  | ||||
| 	// DefaultUDPSpanServerHost is the default host to send the spans to, via UDP | ||||
| 	DefaultUDPSpanServerHost = "localhost" | ||||
|  | ||||
| 	// DefaultUDPSpanServerPort is the default port to send the spans to, via UDP | ||||
| 	DefaultUDPSpanServerPort = 6831 | ||||
|  | ||||
| 	// DefaultMaxTagValueLength is the default max length of byte array or string allowed in the tag value. | ||||
| 	DefaultMaxTagValueLength = 256 | ||||
| ) | ||||
							
								
								
									
										258
									
								
								vendor/github.com/uber/jaeger-client-go/context.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										258
									
								
								vendor/github.com/uber/jaeger-client-go/context.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,258 @@ | ||||
| // Copyright (c) 2017 Uber Technologies, Inc. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package jaeger | ||||
|  | ||||
| import ( | ||||
| 	"errors" | ||||
| 	"fmt" | ||||
| 	"strconv" | ||||
| 	"strings" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	flagSampled = byte(1) | ||||
| 	flagDebug   = byte(2) | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	errEmptyTracerStateString     = errors.New("Cannot convert empty string to tracer state") | ||||
| 	errMalformedTracerStateString = errors.New("String does not match tracer state format") | ||||
|  | ||||
| 	emptyContext = SpanContext{} | ||||
| ) | ||||
|  | ||||
| // TraceID represents unique 128bit identifier of a trace | ||||
| type TraceID struct { | ||||
| 	High, Low uint64 | ||||
| } | ||||
|  | ||||
| // SpanID represents unique 64bit identifier of a span | ||||
| type SpanID uint64 | ||||
|  | ||||
| // SpanContext represents propagated span identity and state | ||||
| type SpanContext struct { | ||||
| 	// traceID represents globally unique ID of the trace. | ||||
| 	// Usually generated as a random number. | ||||
| 	traceID TraceID | ||||
|  | ||||
| 	// spanID represents span ID that must be unique within its trace, | ||||
| 	// but does not have to be globally unique. | ||||
| 	spanID SpanID | ||||
|  | ||||
| 	// parentID refers to the ID of the parent span. | ||||
| 	// Should be 0 if the current span is a root span. | ||||
| 	parentID SpanID | ||||
|  | ||||
| 	// flags is a bitmap containing such bits as 'sampled' and 'debug'. | ||||
| 	flags byte | ||||
|  | ||||
| 	// Distributed Context baggage. The is a snapshot in time. | ||||
| 	baggage map[string]string | ||||
|  | ||||
| 	// debugID can be set to some correlation ID when the context is being | ||||
| 	// extracted from a TextMap carrier. | ||||
| 	// | ||||
| 	// See JaegerDebugHeader in constants.go | ||||
| 	debugID string | ||||
| } | ||||
|  | ||||
| // ForeachBaggageItem implements ForeachBaggageItem() of opentracing.SpanContext | ||||
| func (c SpanContext) ForeachBaggageItem(handler func(k, v string) bool) { | ||||
| 	for k, v := range c.baggage { | ||||
| 		if !handler(k, v) { | ||||
| 			break | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // IsSampled returns whether this trace was chosen for permanent storage | ||||
| // by the sampling mechanism of the tracer. | ||||
| func (c SpanContext) IsSampled() bool { | ||||
| 	return (c.flags & flagSampled) == flagSampled | ||||
| } | ||||
|  | ||||
| // IsDebug indicates whether sampling was explicitly requested by the service. | ||||
| func (c SpanContext) IsDebug() bool { | ||||
| 	return (c.flags & flagDebug) == flagDebug | ||||
| } | ||||
|  | ||||
| // IsValid indicates whether this context actually represents a valid trace. | ||||
| func (c SpanContext) IsValid() bool { | ||||
| 	return c.traceID.IsValid() && c.spanID != 0 | ||||
| } | ||||
|  | ||||
| func (c SpanContext) String() string { | ||||
| 	if c.traceID.High == 0 { | ||||
| 		return fmt.Sprintf("%x:%x:%x:%x", c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.flags) | ||||
| 	} | ||||
| 	return fmt.Sprintf("%x%016x:%x:%x:%x", c.traceID.High, c.traceID.Low, uint64(c.spanID), uint64(c.parentID), c.flags) | ||||
| } | ||||
|  | ||||
| // ContextFromString reconstructs the Context encoded in a string | ||||
| func ContextFromString(value string) (SpanContext, error) { | ||||
| 	var context SpanContext | ||||
| 	if value == "" { | ||||
| 		return emptyContext, errEmptyTracerStateString | ||||
| 	} | ||||
| 	parts := strings.Split(value, ":") | ||||
| 	if len(parts) != 4 { | ||||
| 		return emptyContext, errMalformedTracerStateString | ||||
| 	} | ||||
| 	var err error | ||||
| 	if context.traceID, err = TraceIDFromString(parts[0]); err != nil { | ||||
| 		return emptyContext, err | ||||
| 	} | ||||
| 	if context.spanID, err = SpanIDFromString(parts[1]); err != nil { | ||||
| 		return emptyContext, err | ||||
| 	} | ||||
| 	if context.parentID, err = SpanIDFromString(parts[2]); err != nil { | ||||
| 		return emptyContext, err | ||||
| 	} | ||||
| 	flags, err := strconv.ParseUint(parts[3], 10, 8) | ||||
| 	if err != nil { | ||||
| 		return emptyContext, err | ||||
| 	} | ||||
| 	context.flags = byte(flags) | ||||
| 	return context, nil | ||||
| } | ||||
|  | ||||
| // TraceID returns the trace ID of this span context | ||||
| func (c SpanContext) TraceID() TraceID { | ||||
| 	return c.traceID | ||||
| } | ||||
|  | ||||
| // SpanID returns the span ID of this span context | ||||
| func (c SpanContext) SpanID() SpanID { | ||||
| 	return c.spanID | ||||
| } | ||||
|  | ||||
| // ParentID returns the parent span ID of this span context | ||||
| func (c SpanContext) ParentID() SpanID { | ||||
| 	return c.parentID | ||||
| } | ||||
|  | ||||
| // NewSpanContext creates a new instance of SpanContext | ||||
| func NewSpanContext(traceID TraceID, spanID, parentID SpanID, sampled bool, baggage map[string]string) SpanContext { | ||||
| 	flags := byte(0) | ||||
| 	if sampled { | ||||
| 		flags = flagSampled | ||||
| 	} | ||||
| 	return SpanContext{ | ||||
| 		traceID:  traceID, | ||||
| 		spanID:   spanID, | ||||
| 		parentID: parentID, | ||||
| 		flags:    flags, | ||||
| 		baggage:  baggage} | ||||
| } | ||||
|  | ||||
| // CopyFrom copies data from ctx into this context, including span identity and baggage. | ||||
| // TODO This is only used by interop.go. Remove once TChannel Go supports OpenTracing. | ||||
| func (c *SpanContext) CopyFrom(ctx *SpanContext) { | ||||
| 	c.traceID = ctx.traceID | ||||
| 	c.spanID = ctx.spanID | ||||
| 	c.parentID = ctx.parentID | ||||
| 	c.flags = ctx.flags | ||||
| 	if l := len(ctx.baggage); l > 0 { | ||||
| 		c.baggage = make(map[string]string, l) | ||||
| 		for k, v := range ctx.baggage { | ||||
| 			c.baggage[k] = v | ||||
| 		} | ||||
| 	} else { | ||||
| 		c.baggage = nil | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // WithBaggageItem creates a new context with an extra baggage item. | ||||
| func (c SpanContext) WithBaggageItem(key, value string) SpanContext { | ||||
| 	var newBaggage map[string]string | ||||
| 	if c.baggage == nil { | ||||
| 		newBaggage = map[string]string{key: value} | ||||
| 	} else { | ||||
| 		newBaggage = make(map[string]string, len(c.baggage)+1) | ||||
| 		for k, v := range c.baggage { | ||||
| 			newBaggage[k] = v | ||||
| 		} | ||||
| 		newBaggage[key] = value | ||||
| 	} | ||||
| 	// Use positional parameters so the compiler will help catch new fields. | ||||
| 	return SpanContext{c.traceID, c.spanID, c.parentID, c.flags, newBaggage, ""} | ||||
| } | ||||
|  | ||||
| // isDebugIDContainerOnly returns true when the instance of the context is only | ||||
| // used to return the debug/correlation ID from extract() method. This happens | ||||
| // in the situation when "jaeger-debug-id" header is passed in the carrier to | ||||
| // the extract() method, but the request otherwise has no span context in it. | ||||
| // Previously this would've returned opentracing.ErrSpanContextNotFound from the | ||||
| // extract method, but now it returns a dummy context with only debugID filled in. | ||||
| // | ||||
| // See JaegerDebugHeader in constants.go | ||||
| // See textMapPropagator#Extract | ||||
| func (c *SpanContext) isDebugIDContainerOnly() bool { | ||||
| 	return !c.traceID.IsValid() && c.debugID != "" | ||||
| } | ||||
|  | ||||
| // ------- TraceID ------- | ||||
|  | ||||
| func (t TraceID) String() string { | ||||
| 	if t.High == 0 { | ||||
| 		return fmt.Sprintf("%x", t.Low) | ||||
| 	} | ||||
| 	return fmt.Sprintf("%x%016x", t.High, t.Low) | ||||
| } | ||||
|  | ||||
| // TraceIDFromString creates a TraceID from a hexadecimal string | ||||
| func TraceIDFromString(s string) (TraceID, error) { | ||||
| 	var hi, lo uint64 | ||||
| 	var err error | ||||
| 	if len(s) > 32 { | ||||
| 		return TraceID{}, fmt.Errorf("TraceID cannot be longer than 32 hex characters: %s", s) | ||||
| 	} else if len(s) > 16 { | ||||
| 		hiLen := len(s) - 16 | ||||
| 		if hi, err = strconv.ParseUint(s[0:hiLen], 16, 64); err != nil { | ||||
| 			return TraceID{}, err | ||||
| 		} | ||||
| 		if lo, err = strconv.ParseUint(s[hiLen:], 16, 64); err != nil { | ||||
| 			return TraceID{}, err | ||||
| 		} | ||||
| 	} else { | ||||
| 		if lo, err = strconv.ParseUint(s, 16, 64); err != nil { | ||||
| 			return TraceID{}, err | ||||
| 		} | ||||
| 	} | ||||
| 	return TraceID{High: hi, Low: lo}, nil | ||||
| } | ||||
|  | ||||
| // IsValid checks if the trace ID is valid, i.e. not zero. | ||||
| func (t TraceID) IsValid() bool { | ||||
| 	return t.High != 0 || t.Low != 0 | ||||
| } | ||||
|  | ||||
| // ------- SpanID ------- | ||||
|  | ||||
| func (s SpanID) String() string { | ||||
| 	return fmt.Sprintf("%x", uint64(s)) | ||||
| } | ||||
|  | ||||
| // SpanIDFromString creates a SpanID from a hexadecimal string | ||||
| func SpanIDFromString(s string) (SpanID, error) { | ||||
| 	if len(s) > 16 { | ||||
| 		return SpanID(0), fmt.Errorf("SpanID cannot be longer than 16 hex characters: %s", s) | ||||
| 	} | ||||
| 	id, err := strconv.ParseUint(s, 16, 64) | ||||
| 	if err != nil { | ||||
| 		return SpanID(0), err | ||||
| 	} | ||||
| 	return SpanID(id), nil | ||||
| } | ||||
							
								
								
									
										56
									
								
								vendor/github.com/uber/jaeger-client-go/contrib_observer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										56
									
								
								vendor/github.com/uber/jaeger-client-go/contrib_observer.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,56 @@ | ||||
| // Copyright (c) 2017 Uber Technologies, Inc. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package jaeger | ||||
|  | ||||
| import ( | ||||
| 	opentracing "github.com/opentracing/opentracing-go" | ||||
| ) | ||||
|  | ||||
| // ContribObserver can be registered with the Tracer to receive notifications | ||||
| // about new Spans. Modelled after github.com/opentracing-contrib/go-observer. | ||||
| type ContribObserver interface { | ||||
| 	// Create and return a span observer. Called when a span starts. | ||||
| 	// If the Observer is not interested in the given span, it must return (nil, false). | ||||
| 	// E.g : | ||||
| 	//     func StartSpan(opName string, opts ...opentracing.StartSpanOption) { | ||||
| 	//         var sp opentracing.Span | ||||
| 	//         sso := opentracing.StartSpanOptions{} | ||||
| 	//         if spanObserver, ok := Observer.OnStartSpan(span, opName, sso); ok { | ||||
| 	//             // we have a valid SpanObserver | ||||
| 	//         } | ||||
| 	//         ... | ||||
| 	//     } | ||||
| 	OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (ContribSpanObserver, bool) | ||||
| } | ||||
|  | ||||
| // ContribSpanObserver is created by the Observer and receives notifications | ||||
| // about other Span events. This interface is meant to match | ||||
| // github.com/opentracing-contrib/go-observer, via duck typing, without | ||||
| // directly importing the go-observer package. | ||||
| type ContribSpanObserver interface { | ||||
| 	OnSetOperationName(operationName string) | ||||
| 	OnSetTag(key string, value interface{}) | ||||
| 	OnFinish(options opentracing.FinishOptions) | ||||
| } | ||||
|  | ||||
| // wrapper observer for the old observers (see observer.go) | ||||
| type oldObserver struct { | ||||
| 	obs Observer | ||||
| } | ||||
|  | ||||
| func (o *oldObserver) OnStartSpan(sp opentracing.Span, operationName string, options opentracing.StartSpanOptions) (ContribSpanObserver, bool) { | ||||
| 	spanObserver := o.obs.OnStartSpan(operationName, options) | ||||
| 	return spanObserver, spanObserver != nil | ||||
| } | ||||
							
								
								
									
										24
									
								
								vendor/github.com/uber/jaeger-client-go/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										24
									
								
								vendor/github.com/uber/jaeger-client-go/doc.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,24 @@ | ||||
| // Copyright (c) 2017 Uber Technologies, Inc. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| /* | ||||
| Package jaeger implements an OpenTracing (http://opentracing.io) Tracer. | ||||
| It is currently using Zipkin-compatible data model and can be directly | ||||
| itegrated with Zipkin backend (http://zipkin.io). | ||||
|  | ||||
| For integration instructions please refer to the README: | ||||
|  | ||||
| https://github.com/uber/jaeger-client-go/blob/master/README.md | ||||
| */ | ||||
| package jaeger | ||||
							
								
								
									
										89
									
								
								vendor/github.com/uber/jaeger-client-go/glide.lock
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										89
									
								
								vendor/github.com/uber/jaeger-client-go/glide.lock
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,89 @@ | ||||
| hash: 3accf84f97bff4a91162736104c0e9b9790820712bd86db6fec5e665f7196a82 | ||||
| updated: 2018-04-30T11:46:43.804556-04:00 | ||||
| imports: | ||||
| - name: github.com/beorn7/perks | ||||
|   version: 3a771d992973f24aa725d07868b467d1ddfceafb | ||||
|   subpackages: | ||||
|   - quantile | ||||
| - name: github.com/codahale/hdrhistogram | ||||
|   version: 3a0bb77429bd3a61596f5e8a3172445844342120 | ||||
| - name: github.com/crossdock/crossdock-go | ||||
|   version: 049aabb0122b03bc9bd30cab8f3f91fb60166361 | ||||
|   subpackages: | ||||
|   - assert | ||||
|   - require | ||||
| - name: github.com/davecgh/go-spew | ||||
|   version: 8991bc29aa16c548c550c7ff78260e27b9ab7c73 | ||||
|   subpackages: | ||||
|   - spew | ||||
| - name: github.com/golang/protobuf | ||||
|   version: bbd03ef6da3a115852eaf24c8a1c46aeb39aa175 | ||||
|   subpackages: | ||||
|   - proto | ||||
| - name: github.com/matttproud/golang_protobuf_extensions | ||||
|   version: c12348ce28de40eed0136aa2b644d0ee0650e56c | ||||
|   subpackages: | ||||
|   - pbutil | ||||
| - name: github.com/opentracing/opentracing-go | ||||
|   version: 1949ddbfd147afd4d964a9f00b24eb291e0e7c38 | ||||
|   subpackages: | ||||
|   - ext | ||||
|   - log | ||||
| - name: github.com/pkg/errors | ||||
|   version: 645ef00459ed84a119197bfb8d8205042c6df63d | ||||
| - name: github.com/pmezard/go-difflib | ||||
|   version: 792786c7400a136282c1664665ae0a8db921c6c2 | ||||
|   subpackages: | ||||
|   - difflib | ||||
| - name: github.com/prometheus/client_golang | ||||
|   version: c5b7fccd204277076155f10851dad72b76a49317 | ||||
|   subpackages: | ||||
|   - prometheus | ||||
| - name: github.com/prometheus/client_model | ||||
|   version: 99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c | ||||
|   subpackages: | ||||
|   - go | ||||
| - name: github.com/prometheus/common | ||||
|   version: 38c53a9f4bfcd932d1b00bfc65e256a7fba6b37a | ||||
|   subpackages: | ||||
|   - expfmt | ||||
|   - internal/bitbucket.org/ww/goautoneg | ||||
|   - model | ||||
| - name: github.com/prometheus/procfs | ||||
|   version: 780932d4fbbe0e69b84c34c20f5c8d0981e109ea | ||||
|   subpackages: | ||||
|   - internal/util | ||||
|   - nfs | ||||
|   - xfs | ||||
| - name: github.com/stretchr/testify | ||||
|   version: 12b6f73e6084dad08a7c6e575284b177ecafbc71 | ||||
|   subpackages: | ||||
|   - assert | ||||
|   - require | ||||
|   - suite | ||||
| - name: github.com/uber/jaeger-lib | ||||
|   version: 4267858c0679cd4e47cefed8d7f70fd386cfb567 | ||||
|   subpackages: | ||||
|   - metrics | ||||
|   - metrics/prometheus | ||||
|   - metrics/testutils | ||||
| - name: go.uber.org/atomic | ||||
|   version: 8474b86a5a6f79c443ce4b2992817ff32cf208b8 | ||||
| - name: go.uber.org/multierr | ||||
|   version: 3c4937480c32f4c13a875a1829af76c98ca3d40a | ||||
| - name: go.uber.org/zap | ||||
|   version: eeedf312bc6c57391d84767a4cd413f02a917974 | ||||
|   subpackages: | ||||
|   - buffer | ||||
|   - internal/bufferpool | ||||
|   - internal/color | ||||
|   - internal/exit | ||||
|   - zapcore | ||||
| - name: golang.org/x/net | ||||
|   version: 6078986fec03a1dcc236c34816c71b0e05018fda | ||||
|   subpackages: | ||||
|   - context | ||||
|   - context/ctxhttp | ||||
| testImports: | ||||
| - name: github.com/uber-go/atomic | ||||
|   version: 8474b86a5a6f79c443ce4b2992817ff32cf208b8 | ||||
							
								
								
									
										22
									
								
								vendor/github.com/uber/jaeger-client-go/glide.yaml
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										22
									
								
								vendor/github.com/uber/jaeger-client-go/glide.yaml
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,22 @@ | ||||
| package: github.com/uber/jaeger-client-go | ||||
| import: | ||||
| - package: github.com/opentracing/opentracing-go | ||||
|   version: ^1 | ||||
|   subpackages: | ||||
|   - ext | ||||
|   - log | ||||
| - package: github.com/crossdock/crossdock-go | ||||
| - package: github.com/uber/jaeger-lib | ||||
|   version: ^1.2.1 | ||||
|   subpackages: | ||||
|   - metrics | ||||
| - package: github.com/pkg/errors | ||||
|   version: ~0.8.0 | ||||
| testImport: | ||||
| - package: github.com/stretchr/testify | ||||
|   subpackages: | ||||
|   - assert | ||||
|   - require | ||||
|   - suite | ||||
| - package: github.com/prometheus/client_golang | ||||
|   version: v0.8.0 | ||||
							
								
								
									
										64
									
								
								vendor/github.com/uber/jaeger-client-go/header.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										64
									
								
								vendor/github.com/uber/jaeger-client-go/header.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,64 @@ | ||||
| // Copyright (c) 2017 Uber Technologies, Inc. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package jaeger | ||||
|  | ||||
| // HeadersConfig contains the values for the header keys that Jaeger will use. | ||||
| // These values may be either custom or default depending on whether custom | ||||
| // values were provided via a configuration. | ||||
| type HeadersConfig struct { | ||||
| 	// JaegerDebugHeader is the name of HTTP header or a TextMap carrier key which, | ||||
| 	// if found in the carrier, forces the trace to be sampled as "debug" trace. | ||||
| 	// The value of the header is recorded as the tag on the root span, so that the | ||||
| 	// trace can be found in the UI using this value as a correlation ID. | ||||
| 	JaegerDebugHeader string `yaml:"jaegerDebugHeader"` | ||||
|  | ||||
| 	// JaegerBaggageHeader is the name of the HTTP header that is used to submit baggage. | ||||
| 	// It differs from TraceBaggageHeaderPrefix in that it can be used only in cases where | ||||
| 	// a root span does not exist. | ||||
| 	JaegerBaggageHeader string `yaml:"jaegerBaggageHeader"` | ||||
|  | ||||
| 	// TraceContextHeaderName is the http header name used to propagate tracing context. | ||||
| 	// This must be in lower-case to avoid mismatches when decoding incoming headers. | ||||
| 	TraceContextHeaderName string `yaml:"TraceContextHeaderName"` | ||||
|  | ||||
| 	// TraceBaggageHeaderPrefix is the prefix for http headers used to propagate baggage. | ||||
| 	// This must be in lower-case to avoid mismatches when decoding incoming headers. | ||||
| 	TraceBaggageHeaderPrefix string `yaml:"traceBaggageHeaderPrefix"` | ||||
| } | ||||
|  | ||||
| func (c *HeadersConfig) applyDefaults() *HeadersConfig { | ||||
| 	if c.JaegerBaggageHeader == "" { | ||||
| 		c.JaegerBaggageHeader = JaegerBaggageHeader | ||||
| 	} | ||||
| 	if c.JaegerDebugHeader == "" { | ||||
| 		c.JaegerDebugHeader = JaegerDebugHeader | ||||
| 	} | ||||
| 	if c.TraceBaggageHeaderPrefix == "" { | ||||
| 		c.TraceBaggageHeaderPrefix = TraceBaggageHeaderPrefix | ||||
| 	} | ||||
| 	if c.TraceContextHeaderName == "" { | ||||
| 		c.TraceContextHeaderName = TraceContextHeaderName | ||||
| 	} | ||||
| 	return c | ||||
| } | ||||
|  | ||||
| func getDefaultHeadersConfig() *HeadersConfig { | ||||
| 	return &HeadersConfig{ | ||||
| 		JaegerDebugHeader:        JaegerDebugHeader, | ||||
| 		JaegerBaggageHeader:      JaegerBaggageHeader, | ||||
| 		TraceContextHeaderName:   TraceContextHeaderName, | ||||
| 		TraceBaggageHeaderPrefix: TraceBaggageHeaderPrefix, | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										101
									
								
								vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										101
									
								
								vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/options.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,101 @@ | ||||
| // Copyright (c) 2017 Uber Technologies, Inc. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package remote | ||||
|  | ||||
| import ( | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/uber/jaeger-client-go" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	defaultMaxValueLength  = 2048 | ||||
| 	defaultRefreshInterval = time.Minute | ||||
| 	defaultHostPort        = "localhost:5778" | ||||
| ) | ||||
|  | ||||
| // Option is a function that sets some option on the RestrictionManager | ||||
| type Option func(options *options) | ||||
|  | ||||
| // Options is a factory for all available options | ||||
| var Options options | ||||
|  | ||||
| type options struct { | ||||
| 	denyBaggageOnInitializationFailure bool | ||||
| 	metrics                            *jaeger.Metrics | ||||
| 	logger                             jaeger.Logger | ||||
| 	hostPort                           string | ||||
| 	refreshInterval                    time.Duration | ||||
| } | ||||
|  | ||||
| // DenyBaggageOnInitializationFailure creates an Option that determines the startup failure mode of RestrictionManager. | ||||
| // If DenyBaggageOnInitializationFailure is true, RestrictionManager will not allow any baggage to be written until baggage | ||||
| // restrictions have been retrieved from agent. | ||||
| // If DenyBaggageOnInitializationFailure is false, RestrictionManager will allow any baggage to be written until baggage | ||||
| // restrictions have been retrieved from agent. | ||||
| func (options) DenyBaggageOnInitializationFailure(b bool) Option { | ||||
| 	return func(o *options) { | ||||
| 		o.denyBaggageOnInitializationFailure = b | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Metrics creates an Option that initializes Metrics on the RestrictionManager, which is used to emit statistics. | ||||
| func (options) Metrics(m *jaeger.Metrics) Option { | ||||
| 	return func(o *options) { | ||||
| 		o.metrics = m | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Logger creates an Option that sets the logger used by the RestrictionManager. | ||||
| func (options) Logger(logger jaeger.Logger) Option { | ||||
| 	return func(o *options) { | ||||
| 		o.logger = logger | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // HostPort creates an Option that sets the hostPort of the local agent that contains the baggage restrictions. | ||||
| func (options) HostPort(hostPort string) Option { | ||||
| 	return func(o *options) { | ||||
| 		o.hostPort = hostPort | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // RefreshInterval creates an Option that sets how often the RestrictionManager will poll local agent for | ||||
| // the baggage restrictions. | ||||
| func (options) RefreshInterval(refreshInterval time.Duration) Option { | ||||
| 	return func(o *options) { | ||||
| 		o.refreshInterval = refreshInterval | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func applyOptions(o ...Option) options { | ||||
| 	opts := options{} | ||||
| 	for _, option := range o { | ||||
| 		option(&opts) | ||||
| 	} | ||||
| 	if opts.metrics == nil { | ||||
| 		opts.metrics = jaeger.NewNullMetrics() | ||||
| 	} | ||||
| 	if opts.logger == nil { | ||||
| 		opts.logger = jaeger.NullLogger | ||||
| 	} | ||||
| 	if opts.hostPort == "" { | ||||
| 		opts.hostPort = defaultHostPort | ||||
| 	} | ||||
| 	if opts.refreshInterval == 0 { | ||||
| 		opts.refreshInterval = defaultRefreshInterval | ||||
| 	} | ||||
| 	return opts | ||||
| } | ||||
							
								
								
									
										157
									
								
								vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										157
									
								
								vendor/github.com/uber/jaeger-client-go/internal/baggage/remote/restriction_manager.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,157 @@ | ||||
| // Copyright (c) 2017 Uber Technologies, Inc. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package remote | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"net/url" | ||||
| 	"sync" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/uber/jaeger-client-go/internal/baggage" | ||||
| 	thrift "github.com/uber/jaeger-client-go/thrift-gen/baggage" | ||||
| 	"github.com/uber/jaeger-client-go/utils" | ||||
| ) | ||||
|  | ||||
| type httpBaggageRestrictionManagerProxy struct { | ||||
| 	url string | ||||
| } | ||||
|  | ||||
| func newHTTPBaggageRestrictionManagerProxy(hostPort, serviceName string) *httpBaggageRestrictionManagerProxy { | ||||
| 	v := url.Values{} | ||||
| 	v.Set("service", serviceName) | ||||
| 	return &httpBaggageRestrictionManagerProxy{ | ||||
| 		url: fmt.Sprintf("http://%s/baggageRestrictions?%s", hostPort, v.Encode()), | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (s *httpBaggageRestrictionManagerProxy) GetBaggageRestrictions(serviceName string) ([]*thrift.BaggageRestriction, error) { | ||||
| 	var out []*thrift.BaggageRestriction | ||||
| 	if err := utils.GetJSON(s.url, &out); err != nil { | ||||
| 		return nil, err | ||||
| 	} | ||||
| 	return out, nil | ||||
| } | ||||
|  | ||||
| // RestrictionManager manages baggage restrictions by retrieving baggage restrictions from agent | ||||
| type RestrictionManager struct { | ||||
| 	options | ||||
|  | ||||
| 	mux                sync.RWMutex | ||||
| 	serviceName        string | ||||
| 	restrictions       map[string]*baggage.Restriction | ||||
| 	thriftProxy        thrift.BaggageRestrictionManager | ||||
| 	pollStopped        sync.WaitGroup | ||||
| 	stopPoll           chan struct{} | ||||
| 	invalidRestriction *baggage.Restriction | ||||
| 	validRestriction   *baggage.Restriction | ||||
|  | ||||
| 	// Determines if the manager has successfully retrieved baggage restrictions from agent | ||||
| 	initialized bool | ||||
| } | ||||
|  | ||||
| // NewRestrictionManager returns a BaggageRestrictionManager that polls the agent for the latest | ||||
| // baggage restrictions. | ||||
| func NewRestrictionManager(serviceName string, options ...Option) *RestrictionManager { | ||||
| 	// TODO there is a developing use case where a single tracer can generate traces on behalf of many services. | ||||
| 	// restrictionsMap will need to exist per service | ||||
| 	opts := applyOptions(options...) | ||||
| 	m := &RestrictionManager{ | ||||
| 		serviceName:        serviceName, | ||||
| 		options:            opts, | ||||
| 		restrictions:       make(map[string]*baggage.Restriction), | ||||
| 		thriftProxy:        newHTTPBaggageRestrictionManagerProxy(opts.hostPort, serviceName), | ||||
| 		stopPoll:           make(chan struct{}), | ||||
| 		invalidRestriction: baggage.NewRestriction(false, 0), | ||||
| 		validRestriction:   baggage.NewRestriction(true, defaultMaxValueLength), | ||||
| 	} | ||||
| 	m.pollStopped.Add(1) | ||||
| 	go m.pollManager() | ||||
| 	return m | ||||
| } | ||||
|  | ||||
| // isReady returns true if the manager has retrieved baggage restrictions from the remote source. | ||||
| func (m *RestrictionManager) isReady() bool { | ||||
| 	m.mux.RLock() | ||||
| 	defer m.mux.RUnlock() | ||||
| 	return m.initialized | ||||
| } | ||||
|  | ||||
| // GetRestriction implements RestrictionManager#GetRestriction. | ||||
| func (m *RestrictionManager) GetRestriction(service, key string) *baggage.Restriction { | ||||
| 	m.mux.RLock() | ||||
| 	defer m.mux.RUnlock() | ||||
| 	if !m.initialized { | ||||
| 		if m.denyBaggageOnInitializationFailure { | ||||
| 			return m.invalidRestriction | ||||
| 		} | ||||
| 		return m.validRestriction | ||||
| 	} | ||||
| 	if restriction, ok := m.restrictions[key]; ok { | ||||
| 		return restriction | ||||
| 	} | ||||
| 	return m.invalidRestriction | ||||
| } | ||||
|  | ||||
| // Close stops remote polling and closes the RemoteRestrictionManager. | ||||
| func (m *RestrictionManager) Close() error { | ||||
| 	close(m.stopPoll) | ||||
| 	m.pollStopped.Wait() | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (m *RestrictionManager) pollManager() { | ||||
| 	defer m.pollStopped.Done() | ||||
| 	// attempt to initialize baggage restrictions | ||||
| 	if err := m.updateRestrictions(); err != nil { | ||||
| 		m.logger.Error(fmt.Sprintf("Failed to initialize baggage restrictions: %s", err.Error())) | ||||
| 	} | ||||
| 	ticker := time.NewTicker(m.refreshInterval) | ||||
| 	defer ticker.Stop() | ||||
|  | ||||
| 	for { | ||||
| 		select { | ||||
| 		case <-ticker.C: | ||||
| 			if err := m.updateRestrictions(); err != nil { | ||||
| 				m.logger.Error(fmt.Sprintf("Failed to update baggage restrictions: %s", err.Error())) | ||||
| 			} | ||||
| 		case <-m.stopPoll: | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (m *RestrictionManager) updateRestrictions() error { | ||||
| 	restrictions, err := m.thriftProxy.GetBaggageRestrictions(m.serviceName) | ||||
| 	if err != nil { | ||||
| 		m.metrics.BaggageRestrictionsUpdateFailure.Inc(1) | ||||
| 		return err | ||||
| 	} | ||||
| 	newRestrictions := m.parseRestrictions(restrictions) | ||||
| 	m.metrics.BaggageRestrictionsUpdateSuccess.Inc(1) | ||||
| 	m.mux.Lock() | ||||
| 	defer m.mux.Unlock() | ||||
| 	m.initialized = true | ||||
| 	m.restrictions = newRestrictions | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (m *RestrictionManager) parseRestrictions(restrictions []*thrift.BaggageRestriction) map[string]*baggage.Restriction { | ||||
| 	setters := make(map[string]*baggage.Restriction, len(restrictions)) | ||||
| 	for _, restriction := range restrictions { | ||||
| 		setters[restriction.BaggageKey] = baggage.NewRestriction(true, int(restriction.MaxValueLength)) | ||||
| 	} | ||||
| 	return setters | ||||
| } | ||||
							
								
								
									
										71
									
								
								vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										71
									
								
								vendor/github.com/uber/jaeger-client-go/internal/baggage/restriction_manager.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,71 @@ | ||||
| // Copyright (c) 2017 Uber Technologies, Inc. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package baggage | ||||
|  | ||||
| const ( | ||||
| 	defaultMaxValueLength = 2048 | ||||
| ) | ||||
|  | ||||
| // Restriction determines whether a baggage key is allowed and contains any restrictions on the baggage value. | ||||
| type Restriction struct { | ||||
| 	keyAllowed     bool | ||||
| 	maxValueLength int | ||||
| } | ||||
|  | ||||
| // NewRestriction returns a new Restriction. | ||||
| func NewRestriction(keyAllowed bool, maxValueLength int) *Restriction { | ||||
| 	return &Restriction{ | ||||
| 		keyAllowed:     keyAllowed, | ||||
| 		maxValueLength: maxValueLength, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // KeyAllowed returns whether the baggage key for this restriction is allowed. | ||||
| func (r *Restriction) KeyAllowed() bool { | ||||
| 	return r.keyAllowed | ||||
| } | ||||
|  | ||||
| // MaxValueLength returns the max length for the baggage value. | ||||
| func (r *Restriction) MaxValueLength() int { | ||||
| 	return r.maxValueLength | ||||
| } | ||||
|  | ||||
| // RestrictionManager keeps track of valid baggage keys and their restrictions. The manager | ||||
| // will return a Restriction for a specific baggage key which will determine whether the baggage | ||||
| // key is allowed for the current service and any other applicable restrictions on the baggage | ||||
| // value. | ||||
| type RestrictionManager interface { | ||||
| 	GetRestriction(service, key string) *Restriction | ||||
| } | ||||
|  | ||||
| // DefaultRestrictionManager allows any baggage key. | ||||
| type DefaultRestrictionManager struct { | ||||
| 	defaultRestriction *Restriction | ||||
| } | ||||
|  | ||||
| // NewDefaultRestrictionManager returns a DefaultRestrictionManager. | ||||
| func NewDefaultRestrictionManager(maxValueLength int) *DefaultRestrictionManager { | ||||
| 	if maxValueLength == 0 { | ||||
| 		maxValueLength = defaultMaxValueLength | ||||
| 	} | ||||
| 	return &DefaultRestrictionManager{ | ||||
| 		defaultRestriction: &Restriction{keyAllowed: true, maxValueLength: maxValueLength}, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // GetRestriction implements RestrictionManager#GetRestriction. | ||||
| func (m *DefaultRestrictionManager) GetRestriction(service, key string) *Restriction { | ||||
| 	return m.defaultRestriction | ||||
| } | ||||
							
								
								
									
										81
									
								
								vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										81
									
								
								vendor/github.com/uber/jaeger-client-go/internal/spanlog/json.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,81 @@ | ||||
| // Copyright (c) 2017 Uber Technologies, Inc. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package spanlog | ||||
|  | ||||
| import ( | ||||
| 	"encoding/json" | ||||
| 	"fmt" | ||||
|  | ||||
| 	"github.com/opentracing/opentracing-go/log" | ||||
| ) | ||||
|  | ||||
| type fieldsAsMap map[string]string | ||||
|  | ||||
| // MaterializeWithJSON converts log Fields into JSON string | ||||
| // TODO refactor into pluggable materializer | ||||
| func MaterializeWithJSON(logFields []log.Field) ([]byte, error) { | ||||
| 	fields := fieldsAsMap(make(map[string]string, len(logFields))) | ||||
| 	for _, field := range logFields { | ||||
| 		field.Marshal(fields) | ||||
| 	} | ||||
| 	if event, ok := fields["event"]; ok && len(fields) == 1 { | ||||
| 		return []byte(event), nil | ||||
| 	} | ||||
| 	return json.Marshal(fields) | ||||
| } | ||||
|  | ||||
| func (ml fieldsAsMap) EmitString(key, value string) { | ||||
| 	ml[key] = value | ||||
| } | ||||
|  | ||||
| func (ml fieldsAsMap) EmitBool(key string, value bool) { | ||||
| 	ml[key] = fmt.Sprintf("%t", value) | ||||
| } | ||||
|  | ||||
| func (ml fieldsAsMap) EmitInt(key string, value int) { | ||||
| 	ml[key] = fmt.Sprintf("%d", value) | ||||
| } | ||||
|  | ||||
| func (ml fieldsAsMap) EmitInt32(key string, value int32) { | ||||
| 	ml[key] = fmt.Sprintf("%d", value) | ||||
| } | ||||
|  | ||||
| func (ml fieldsAsMap) EmitInt64(key string, value int64) { | ||||
| 	ml[key] = fmt.Sprintf("%d", value) | ||||
| } | ||||
|  | ||||
| func (ml fieldsAsMap) EmitUint32(key string, value uint32) { | ||||
| 	ml[key] = fmt.Sprintf("%d", value) | ||||
| } | ||||
|  | ||||
| func (ml fieldsAsMap) EmitUint64(key string, value uint64) { | ||||
| 	ml[key] = fmt.Sprintf("%d", value) | ||||
| } | ||||
|  | ||||
| func (ml fieldsAsMap) EmitFloat32(key string, value float32) { | ||||
| 	ml[key] = fmt.Sprintf("%f", value) | ||||
| } | ||||
|  | ||||
| func (ml fieldsAsMap) EmitFloat64(key string, value float64) { | ||||
| 	ml[key] = fmt.Sprintf("%f", value) | ||||
| } | ||||
|  | ||||
| func (ml fieldsAsMap) EmitObject(key string, value interface{}) { | ||||
| 	ml[key] = fmt.Sprintf("%+v", value) | ||||
| } | ||||
|  | ||||
| func (ml fieldsAsMap) EmitLazyLogger(value log.LazyLogger) { | ||||
| 	value(ml) | ||||
| } | ||||
							
								
								
									
										99
									
								
								vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										99
									
								
								vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/options.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,99 @@ | ||||
| // Copyright (c) 2018 The Jaeger Authors. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package remote | ||||
|  | ||||
| import ( | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/uber/jaeger-client-go" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	defaultHostPort        = "localhost:5778" | ||||
| 	defaultRefreshInterval = time.Second * 5 | ||||
| ) | ||||
|  | ||||
| // Option is a function that sets some option on the Throttler | ||||
| type Option func(options *options) | ||||
|  | ||||
| // Options is a factory for all available options | ||||
| var Options options | ||||
|  | ||||
| type options struct { | ||||
| 	metrics                   *jaeger.Metrics | ||||
| 	logger                    jaeger.Logger | ||||
| 	hostPort                  string | ||||
| 	refreshInterval           time.Duration | ||||
| 	synchronousInitialization bool | ||||
| } | ||||
|  | ||||
| // Metrics creates an Option that initializes Metrics on the Throttler, which is used to emit statistics. | ||||
| func (options) Metrics(m *jaeger.Metrics) Option { | ||||
| 	return func(o *options) { | ||||
| 		o.metrics = m | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // Logger creates an Option that sets the logger used by the Throttler. | ||||
| func (options) Logger(logger jaeger.Logger) Option { | ||||
| 	return func(o *options) { | ||||
| 		o.logger = logger | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // HostPort creates an Option that sets the hostPort of the local agent that keeps track of credits. | ||||
| func (options) HostPort(hostPort string) Option { | ||||
| 	return func(o *options) { | ||||
| 		o.hostPort = hostPort | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // RefreshInterval creates an Option that sets how often the Throttler will poll local agent for | ||||
| // credits. | ||||
| func (options) RefreshInterval(refreshInterval time.Duration) Option { | ||||
| 	return func(o *options) { | ||||
| 		o.refreshInterval = refreshInterval | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // SynchronousInitialization creates an Option that determines whether the throttler should synchronously | ||||
| // fetch credits from the agent when an operation is seen for the first time. This should be set to true | ||||
| // if the client will be used by a short lived service that needs to ensure that credits are fetched upfront | ||||
| // such that sampling or throttling occurs. | ||||
| func (options) SynchronousInitialization(b bool) Option { | ||||
| 	return func(o *options) { | ||||
| 		o.synchronousInitialization = b | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func applyOptions(o ...Option) options { | ||||
| 	opts := options{} | ||||
| 	for _, option := range o { | ||||
| 		option(&opts) | ||||
| 	} | ||||
| 	if opts.metrics == nil { | ||||
| 		opts.metrics = jaeger.NewNullMetrics() | ||||
| 	} | ||||
| 	if opts.logger == nil { | ||||
| 		opts.logger = jaeger.NullLogger | ||||
| 	} | ||||
| 	if opts.hostPort == "" { | ||||
| 		opts.hostPort = defaultHostPort | ||||
| 	} | ||||
| 	if opts.refreshInterval == 0 { | ||||
| 		opts.refreshInterval = defaultRefreshInterval | ||||
| 	} | ||||
| 	return opts | ||||
| } | ||||
							
								
								
									
										216
									
								
								vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										216
									
								
								vendor/github.com/uber/jaeger-client-go/internal/throttler/remote/throttler.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,216 @@ | ||||
| // Copyright (c) 2018 The Jaeger Authors. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package remote | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
| 	"net/url" | ||||
| 	"sync" | ||||
| 	"sync/atomic" | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/pkg/errors" | ||||
|  | ||||
| 	"github.com/uber/jaeger-client-go" | ||||
| 	"github.com/uber/jaeger-client-go/utils" | ||||
| ) | ||||
|  | ||||
| const ( | ||||
| 	// minimumCredits is the minimum amount of credits necessary to not be throttled. | ||||
| 	// i.e. if currentCredits > minimumCredits, then the operation will not be throttled. | ||||
| 	minimumCredits = 1.0 | ||||
| ) | ||||
|  | ||||
| var ( | ||||
| 	errorUUIDNotSet = errors.New("Throttler UUID must be set") | ||||
| ) | ||||
|  | ||||
| type operationBalance struct { | ||||
| 	Operation string  `json:"operation"` | ||||
| 	Balance   float64 `json:"balance"` | ||||
| } | ||||
|  | ||||
| type creditResponse struct { | ||||
| 	Balances []operationBalance `json:"balances"` | ||||
| } | ||||
|  | ||||
| type httpCreditManagerProxy struct { | ||||
| 	hostPort string | ||||
| } | ||||
|  | ||||
| func newHTTPCreditManagerProxy(hostPort string) *httpCreditManagerProxy { | ||||
| 	return &httpCreditManagerProxy{ | ||||
| 		hostPort: hostPort, | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // N.B. Operations list must not be empty. | ||||
| func (m *httpCreditManagerProxy) FetchCredits(uuid, serviceName string, operations []string) (*creditResponse, error) { | ||||
| 	params := url.Values{} | ||||
| 	params.Set("service", serviceName) | ||||
| 	params.Set("uuid", uuid) | ||||
| 	for _, op := range operations { | ||||
| 		params.Add("operations", op) | ||||
| 	} | ||||
| 	var resp creditResponse | ||||
| 	if err := utils.GetJSON(fmt.Sprintf("http://%s/credits?%s", m.hostPort, params.Encode()), &resp); err != nil { | ||||
| 		return nil, errors.Wrap(err, "Failed to receive credits from agent") | ||||
| 	} | ||||
| 	return &resp, nil | ||||
| } | ||||
|  | ||||
| // Throttler retrieves credits from agent and uses it to throttle operations. | ||||
| type Throttler struct { | ||||
| 	options | ||||
|  | ||||
| 	mux           sync.RWMutex | ||||
| 	service       string | ||||
| 	uuid          atomic.Value | ||||
| 	creditManager *httpCreditManagerProxy | ||||
| 	credits       map[string]float64 // map of operation->credits | ||||
| 	close         chan struct{} | ||||
| 	stopped       sync.WaitGroup | ||||
| } | ||||
|  | ||||
| // NewThrottler returns a Throttler that polls agent for credits and uses them to throttle | ||||
| // the service. | ||||
| func NewThrottler(service string, options ...Option) *Throttler { | ||||
| 	opts := applyOptions(options...) | ||||
| 	creditManager := newHTTPCreditManagerProxy(opts.hostPort) | ||||
| 	t := &Throttler{ | ||||
| 		options:       opts, | ||||
| 		creditManager: creditManager, | ||||
| 		service:       service, | ||||
| 		credits:       make(map[string]float64), | ||||
| 		close:         make(chan struct{}), | ||||
| 	} | ||||
| 	t.stopped.Add(1) | ||||
| 	go t.pollManager() | ||||
| 	return t | ||||
| } | ||||
|  | ||||
| // IsAllowed implements Throttler#IsAllowed. | ||||
| func (t *Throttler) IsAllowed(operation string) bool { | ||||
| 	t.mux.Lock() | ||||
| 	defer t.mux.Unlock() | ||||
| 	value, ok := t.credits[operation] | ||||
| 	if !ok || value == 0 { | ||||
| 		if !ok { | ||||
| 			// NOTE: This appears to be a no-op at first glance, but it stores | ||||
| 			// the operation key in the map. Necessary for functionality of | ||||
| 			// Throttler#operations method. | ||||
| 			t.credits[operation] = 0 | ||||
| 		} | ||||
| 		if !t.synchronousInitialization { | ||||
| 			t.metrics.ThrottledDebugSpans.Inc(1) | ||||
| 			return false | ||||
| 		} | ||||
| 		// If it is the first time this operation is being checked, synchronously fetch | ||||
| 		// the credits. | ||||
| 		credits, err := t.fetchCredits([]string{operation}) | ||||
| 		if err != nil { | ||||
| 			// Failed to receive credits from agent, try again next time | ||||
| 			t.logger.Error("Failed to fetch credits: " + err.Error()) | ||||
| 			return false | ||||
| 		} | ||||
| 		if len(credits.Balances) == 0 { | ||||
| 			// This shouldn't happen but just in case | ||||
| 			return false | ||||
| 		} | ||||
| 		for _, opBalance := range credits.Balances { | ||||
| 			t.credits[opBalance.Operation] += opBalance.Balance | ||||
| 		} | ||||
| 	} | ||||
| 	return t.isAllowed(operation) | ||||
| } | ||||
|  | ||||
| // Close stops the throttler from fetching credits from remote. | ||||
| func (t *Throttler) Close() error { | ||||
| 	close(t.close) | ||||
| 	t.stopped.Wait() | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| // SetProcess implements ProcessSetter#SetProcess. It's imperative that the UUID is set before any remote | ||||
| // requests are made. | ||||
| func (t *Throttler) SetProcess(process jaeger.Process) { | ||||
| 	if process.UUID != "" { | ||||
| 		t.uuid.Store(process.UUID) | ||||
| 	} | ||||
| } | ||||
|  | ||||
| // N.B. This function must be called with the Write Lock | ||||
| func (t *Throttler) isAllowed(operation string) bool { | ||||
| 	credits := t.credits[operation] | ||||
| 	if credits < minimumCredits { | ||||
| 		t.metrics.ThrottledDebugSpans.Inc(1) | ||||
| 		return false | ||||
| 	} | ||||
| 	t.credits[operation] = credits - minimumCredits | ||||
| 	return true | ||||
| } | ||||
|  | ||||
| func (t *Throttler) pollManager() { | ||||
| 	defer t.stopped.Done() | ||||
| 	ticker := time.NewTicker(t.refreshInterval) | ||||
| 	defer ticker.Stop() | ||||
| 	for { | ||||
| 		select { | ||||
| 		case <-ticker.C: | ||||
| 			t.refreshCredits() | ||||
| 		case <-t.close: | ||||
| 			return | ||||
| 		} | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (t *Throttler) operations() []string { | ||||
| 	t.mux.RLock() | ||||
| 	defer t.mux.RUnlock() | ||||
| 	operations := make([]string, 0, len(t.credits)) | ||||
| 	for op := range t.credits { | ||||
| 		operations = append(operations, op) | ||||
| 	} | ||||
| 	return operations | ||||
| } | ||||
|  | ||||
| func (t *Throttler) refreshCredits() { | ||||
| 	operations := t.operations() | ||||
| 	if len(operations) == 0 { | ||||
| 		return | ||||
| 	} | ||||
| 	newCredits, err := t.fetchCredits(operations) | ||||
| 	if err != nil { | ||||
| 		t.metrics.ThrottlerUpdateFailure.Inc(1) | ||||
| 		t.logger.Error("Failed to fetch credits: " + err.Error()) | ||||
| 		return | ||||
| 	} | ||||
| 	t.metrics.ThrottlerUpdateSuccess.Inc(1) | ||||
|  | ||||
| 	t.mux.Lock() | ||||
| 	defer t.mux.Unlock() | ||||
| 	for _, opBalance := range newCredits.Balances { | ||||
| 		t.credits[opBalance.Operation] += opBalance.Balance | ||||
| 	} | ||||
| } | ||||
|  | ||||
| func (t *Throttler) fetchCredits(operations []string) (*creditResponse, error) { | ||||
| 	uuid := t.uuid.Load() | ||||
| 	uuidStr, _ := uuid.(string) | ||||
| 	if uuid == nil || uuidStr == "" { | ||||
| 		return nil, errorUUIDNotSet | ||||
| 	} | ||||
| 	return t.creditManager.FetchCredits(uuidStr, t.service, operations) | ||||
| } | ||||
							
								
								
									
										32
									
								
								vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										32
									
								
								vendor/github.com/uber/jaeger-client-go/internal/throttler/throttler.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,32 @@ | ||||
| // Copyright (c) 2018 The Jaeger Authors. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package throttler | ||||
|  | ||||
| // Throttler is used to rate limits operations. For example, given how debug spans | ||||
| // are always sampled, a throttler can be enabled per client to rate limit the amount | ||||
| // of debug spans a client can start. | ||||
| type Throttler interface { | ||||
| 	// IsAllowed determines whether the operation should be allowed and not be | ||||
| 	// throttled. | ||||
| 	IsAllowed(operation string) bool | ||||
| } | ||||
|  | ||||
| // DefaultThrottler doesn't throttle at all. | ||||
| type DefaultThrottler struct{} | ||||
|  | ||||
| // IsAllowed implements Throttler#IsAllowed. | ||||
| func (t DefaultThrottler) IsAllowed(operation string) bool { | ||||
| 	return true | ||||
| } | ||||
							
								
								
									
										55
									
								
								vendor/github.com/uber/jaeger-client-go/interop.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										55
									
								
								vendor/github.com/uber/jaeger-client-go/interop.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,55 @@ | ||||
| // Copyright (c) 2017 Uber Technologies, Inc. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package jaeger | ||||
|  | ||||
| import ( | ||||
| 	"github.com/opentracing/opentracing-go" | ||||
| ) | ||||
|  | ||||
| // TODO this file should not be needed after TChannel PR. | ||||
|  | ||||
| type formatKey int | ||||
|  | ||||
| // SpanContextFormat is a constant used as OpenTracing Format. | ||||
| // Requires *SpanContext as carrier. | ||||
| // This format is intended for interop with TChannel or other Zipkin-like tracers. | ||||
| const SpanContextFormat formatKey = iota | ||||
|  | ||||
| type jaegerTraceContextPropagator struct { | ||||
| 	tracer *Tracer | ||||
| } | ||||
|  | ||||
| func (p *jaegerTraceContextPropagator) Inject( | ||||
| 	ctx SpanContext, | ||||
| 	abstractCarrier interface{}, | ||||
| ) error { | ||||
| 	carrier, ok := abstractCarrier.(*SpanContext) | ||||
| 	if !ok { | ||||
| 		return opentracing.ErrInvalidCarrier | ||||
| 	} | ||||
|  | ||||
| 	carrier.CopyFrom(&ctx) | ||||
| 	return nil | ||||
| } | ||||
|  | ||||
| func (p *jaegerTraceContextPropagator) Extract(abstractCarrier interface{}) (SpanContext, error) { | ||||
| 	carrier, ok := abstractCarrier.(*SpanContext) | ||||
| 	if !ok { | ||||
| 		return emptyContext, opentracing.ErrInvalidCarrier | ||||
| 	} | ||||
| 	ctx := new(SpanContext) | ||||
| 	ctx.CopyFrom(carrier) | ||||
| 	return *ctx, nil | ||||
| } | ||||
							
								
								
									
										84
									
								
								vendor/github.com/uber/jaeger-client-go/jaeger_tag.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										84
									
								
								vendor/github.com/uber/jaeger-client-go/jaeger_tag.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,84 @@ | ||||
| // Copyright (c) 2017 Uber Technologies, Inc. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package jaeger | ||||
|  | ||||
| import ( | ||||
| 	"fmt" | ||||
|  | ||||
| 	"github.com/opentracing/opentracing-go/log" | ||||
|  | ||||
| 	j "github.com/uber/jaeger-client-go/thrift-gen/jaeger" | ||||
| ) | ||||
|  | ||||
| type tags []*j.Tag | ||||
|  | ||||
| // ConvertLogsToJaegerTags converts log Fields into jaeger tags. | ||||
| func ConvertLogsToJaegerTags(logFields []log.Field) []*j.Tag { | ||||
| 	fields := tags(make([]*j.Tag, 0, len(logFields))) | ||||
| 	for _, field := range logFields { | ||||
| 		field.Marshal(&fields) | ||||
| 	} | ||||
| 	return fields | ||||
| } | ||||
|  | ||||
| func (t *tags) EmitString(key, value string) { | ||||
| 	*t = append(*t, &j.Tag{Key: key, VType: j.TagType_STRING, VStr: &value}) | ||||
| } | ||||
|  | ||||
| func (t *tags) EmitBool(key string, value bool) { | ||||
| 	*t = append(*t, &j.Tag{Key: key, VType: j.TagType_BOOL, VBool: &value}) | ||||
| } | ||||
|  | ||||
| func (t *tags) EmitInt(key string, value int) { | ||||
| 	vLong := int64(value) | ||||
| 	*t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong}) | ||||
| } | ||||
|  | ||||
| func (t *tags) EmitInt32(key string, value int32) { | ||||
| 	vLong := int64(value) | ||||
| 	*t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong}) | ||||
| } | ||||
|  | ||||
| func (t *tags) EmitInt64(key string, value int64) { | ||||
| 	*t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &value}) | ||||
| } | ||||
|  | ||||
| func (t *tags) EmitUint32(key string, value uint32) { | ||||
| 	vLong := int64(value) | ||||
| 	*t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong}) | ||||
| } | ||||
|  | ||||
| func (t *tags) EmitUint64(key string, value uint64) { | ||||
| 	vLong := int64(value) | ||||
| 	*t = append(*t, &j.Tag{Key: key, VType: j.TagType_LONG, VLong: &vLong}) | ||||
| } | ||||
|  | ||||
| func (t *tags) EmitFloat32(key string, value float32) { | ||||
| 	vDouble := float64(value) | ||||
| 	*t = append(*t, &j.Tag{Key: key, VType: j.TagType_DOUBLE, VDouble: &vDouble}) | ||||
| } | ||||
|  | ||||
| func (t *tags) EmitFloat64(key string, value float64) { | ||||
| 	*t = append(*t, &j.Tag{Key: key, VType: j.TagType_DOUBLE, VDouble: &value}) | ||||
| } | ||||
|  | ||||
| func (t *tags) EmitObject(key string, value interface{}) { | ||||
| 	vStr := fmt.Sprintf("%+v", value) | ||||
| 	*t = append(*t, &j.Tag{Key: key, VType: j.TagType_STRING, VStr: &vStr}) | ||||
| } | ||||
|  | ||||
| func (t *tags) EmitLazyLogger(value log.LazyLogger) { | ||||
| 	value(t) | ||||
| } | ||||
							
								
								
									
										179
									
								
								vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										179
									
								
								vendor/github.com/uber/jaeger-client-go/jaeger_thrift_span.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,179 @@ | ||||
| // Copyright (c) 2017 Uber Technologies, Inc. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package jaeger | ||||
|  | ||||
| import ( | ||||
| 	"time" | ||||
|  | ||||
| 	"github.com/opentracing/opentracing-go" | ||||
|  | ||||
| 	j "github.com/uber/jaeger-client-go/thrift-gen/jaeger" | ||||
| 	"github.com/uber/jaeger-client-go/utils" | ||||
| ) | ||||
|  | ||||
| // BuildJaegerThrift builds jaeger span based on internal span. | ||||
| func BuildJaegerThrift(span *Span) *j.Span { | ||||
| 	span.Lock() | ||||
| 	defer span.Unlock() | ||||
| 	startTime := utils.TimeToMicrosecondsSinceEpochInt64(span.startTime) | ||||
| 	duration := span.duration.Nanoseconds() / int64(time.Microsecond) | ||||
| 	jaegerSpan := &j.Span{ | ||||
| 		TraceIdLow:    int64(span.context.traceID.Low), | ||||
| 		TraceIdHigh:   int64(span.context.traceID.High), | ||||
| 		SpanId:        int64(span.context.spanID), | ||||
| 		ParentSpanId:  int64(span.context.parentID), | ||||
| 		OperationName: span.operationName, | ||||
| 		Flags:         int32(span.context.flags), | ||||
| 		StartTime:     startTime, | ||||
| 		Duration:      duration, | ||||
| 		Tags:          buildTags(span.tags, span.tracer.options.maxTagValueLength), | ||||
| 		Logs:          buildLogs(span.logs), | ||||
| 		References:    buildReferences(span.references), | ||||
| 	} | ||||
| 	return jaegerSpan | ||||
| } | ||||
|  | ||||
| // BuildJaegerProcessThrift creates a thrift Process type. | ||||
| func BuildJaegerProcessThrift(span *Span) *j.Process { | ||||
| 	span.Lock() | ||||
| 	defer span.Unlock() | ||||
| 	return buildJaegerProcessThrift(span.tracer) | ||||
| } | ||||
|  | ||||
| func buildJaegerProcessThrift(tracer *Tracer) *j.Process { | ||||
| 	process := &j.Process{ | ||||
| 		ServiceName: tracer.serviceName, | ||||
| 		Tags:        buildTags(tracer.tags, tracer.options.maxTagValueLength), | ||||
| 	} | ||||
| 	if tracer.process.UUID != "" { | ||||
| 		process.Tags = append(process.Tags, &j.Tag{Key: TracerUUIDTagKey, VStr: &tracer.process.UUID, VType: j.TagType_STRING}) | ||||
| 	} | ||||
| 	return process | ||||
| } | ||||
|  | ||||
| func buildTags(tags []Tag, maxTagValueLength int) []*j.Tag { | ||||
| 	jTags := make([]*j.Tag, 0, len(tags)) | ||||
| 	for _, tag := range tags { | ||||
| 		jTag := buildTag(&tag, maxTagValueLength) | ||||
| 		jTags = append(jTags, jTag) | ||||
| 	} | ||||
| 	return jTags | ||||
| } | ||||
|  | ||||
| func buildLogs(logs []opentracing.LogRecord) []*j.Log { | ||||
| 	jLogs := make([]*j.Log, 0, len(logs)) | ||||
| 	for _, log := range logs { | ||||
| 		jLog := &j.Log{ | ||||
| 			Timestamp: utils.TimeToMicrosecondsSinceEpochInt64(log.Timestamp), | ||||
| 			Fields:    ConvertLogsToJaegerTags(log.Fields), | ||||
| 		} | ||||
| 		jLogs = append(jLogs, jLog) | ||||
| 	} | ||||
| 	return jLogs | ||||
| } | ||||
|  | ||||
| func buildTag(tag *Tag, maxTagValueLength int) *j.Tag { | ||||
| 	jTag := &j.Tag{Key: tag.key} | ||||
| 	switch value := tag.value.(type) { | ||||
| 	case string: | ||||
| 		vStr := truncateString(value, maxTagValueLength) | ||||
| 		jTag.VStr = &vStr | ||||
| 		jTag.VType = j.TagType_STRING | ||||
| 	case []byte: | ||||
| 		if len(value) > maxTagValueLength { | ||||
| 			value = value[:maxTagValueLength] | ||||
| 		} | ||||
| 		jTag.VBinary = value | ||||
| 		jTag.VType = j.TagType_BINARY | ||||
| 	case int: | ||||
| 		vLong := int64(value) | ||||
| 		jTag.VLong = &vLong | ||||
| 		jTag.VType = j.TagType_LONG | ||||
| 	case uint: | ||||
| 		vLong := int64(value) | ||||
| 		jTag.VLong = &vLong | ||||
| 		jTag.VType = j.TagType_LONG | ||||
| 	case int8: | ||||
| 		vLong := int64(value) | ||||
| 		jTag.VLong = &vLong | ||||
| 		jTag.VType = j.TagType_LONG | ||||
| 	case uint8: | ||||
| 		vLong := int64(value) | ||||
| 		jTag.VLong = &vLong | ||||
| 		jTag.VType = j.TagType_LONG | ||||
| 	case int16: | ||||
| 		vLong := int64(value) | ||||
| 		jTag.VLong = &vLong | ||||
| 		jTag.VType = j.TagType_LONG | ||||
| 	case uint16: | ||||
| 		vLong := int64(value) | ||||
| 		jTag.VLong = &vLong | ||||
| 		jTag.VType = j.TagType_LONG | ||||
| 	case int32: | ||||
| 		vLong := int64(value) | ||||
| 		jTag.VLong = &vLong | ||||
| 		jTag.VType = j.TagType_LONG | ||||
| 	case uint32: | ||||
| 		vLong := int64(value) | ||||
| 		jTag.VLong = &vLong | ||||
| 		jTag.VType = j.TagType_LONG | ||||
| 	case int64: | ||||
| 		vLong := int64(value) | ||||
| 		jTag.VLong = &vLong | ||||
| 		jTag.VType = j.TagType_LONG | ||||
| 	case uint64: | ||||
| 		vLong := int64(value) | ||||
| 		jTag.VLong = &vLong | ||||
| 		jTag.VType = j.TagType_LONG | ||||
| 	case float32: | ||||
| 		vDouble := float64(value) | ||||
| 		jTag.VDouble = &vDouble | ||||
| 		jTag.VType = j.TagType_DOUBLE | ||||
| 	case float64: | ||||
| 		vDouble := float64(value) | ||||
| 		jTag.VDouble = &vDouble | ||||
| 		jTag.VType = j.TagType_DOUBLE | ||||
| 	case bool: | ||||
| 		vBool := value | ||||
| 		jTag.VBool = &vBool | ||||
| 		jTag.VType = j.TagType_BOOL | ||||
| 	default: | ||||
| 		vStr := truncateString(stringify(value), maxTagValueLength) | ||||
| 		jTag.VStr = &vStr | ||||
| 		jTag.VType = j.TagType_STRING | ||||
| 	} | ||||
| 	return jTag | ||||
| } | ||||
|  | ||||
| func buildReferences(references []Reference) []*j.SpanRef { | ||||
| 	retMe := make([]*j.SpanRef, 0, len(references)) | ||||
| 	for _, ref := range references { | ||||
| 		if ref.Type == opentracing.ChildOfRef { | ||||
| 			retMe = append(retMe, spanRef(ref.Context, j.SpanRefType_CHILD_OF)) | ||||
| 		} else if ref.Type == opentracing.FollowsFromRef { | ||||
| 			retMe = append(retMe, spanRef(ref.Context, j.SpanRefType_FOLLOWS_FROM)) | ||||
| 		} | ||||
| 	} | ||||
| 	return retMe | ||||
| } | ||||
|  | ||||
| func spanRef(ctx SpanContext, refType j.SpanRefType) *j.SpanRef { | ||||
| 	return &j.SpanRef{ | ||||
| 		RefType:     refType, | ||||
| 		TraceIdLow:  int64(ctx.traceID.Low), | ||||
| 		TraceIdHigh: int64(ctx.traceID.High), | ||||
| 		SpanId:      int64(ctx.spanID), | ||||
| 	} | ||||
| } | ||||
							
								
								
									
										90
									
								
								vendor/github.com/uber/jaeger-client-go/log/logger.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										90
									
								
								vendor/github.com/uber/jaeger-client-go/log/logger.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,90 @@ | ||||
| // Copyright (c) 2017 Uber Technologies, Inc. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package log | ||||
|  | ||||
| import ( | ||||
| 	"bytes" | ||||
| 	"fmt" | ||||
| 	"log" | ||||
| 	"sync" | ||||
| ) | ||||
|  | ||||
| // Logger provides an abstract interface for logging from Reporters. | ||||
| // Applications can provide their own implementation of this interface to adapt | ||||
| // reporters logging to whatever logging library they prefer (stdlib log, | ||||
| // logrus, go-logging, etc). | ||||
| type Logger interface { | ||||
| 	// Error logs a message at error priority | ||||
| 	Error(msg string) | ||||
|  | ||||
| 	// Infof logs a message at info priority | ||||
| 	Infof(msg string, args ...interface{}) | ||||
| } | ||||
|  | ||||
| // StdLogger is implementation of the Logger interface that delegates to default `log` package | ||||
| var StdLogger = &stdLogger{} | ||||
|  | ||||
| type stdLogger struct{} | ||||
|  | ||||
| func (l *stdLogger) Error(msg string) { | ||||
| 	log.Printf("ERROR: %s", msg) | ||||
| } | ||||
|  | ||||
| // Infof logs a message at info priority | ||||
| func (l *stdLogger) Infof(msg string, args ...interface{}) { | ||||
| 	log.Printf(msg, args...) | ||||
| } | ||||
|  | ||||
| // NullLogger is implementation of the Logger interface that is no-op | ||||
| var NullLogger = &nullLogger{} | ||||
|  | ||||
| type nullLogger struct{} | ||||
|  | ||||
| func (l *nullLogger) Error(msg string)                      {} | ||||
| func (l *nullLogger) Infof(msg string, args ...interface{}) {} | ||||
|  | ||||
| // BytesBufferLogger implements Logger backed by a bytes.Buffer. | ||||
| type BytesBufferLogger struct { | ||||
| 	mux sync.Mutex | ||||
| 	buf bytes.Buffer | ||||
| } | ||||
|  | ||||
| // Error implements Logger. | ||||
| func (l *BytesBufferLogger) Error(msg string) { | ||||
| 	l.mux.Lock() | ||||
| 	l.buf.WriteString(fmt.Sprintf("ERROR: %s\n", msg)) | ||||
| 	l.mux.Unlock() | ||||
| } | ||||
|  | ||||
| // Infof implements Logger. | ||||
| func (l *BytesBufferLogger) Infof(msg string, args ...interface{}) { | ||||
| 	l.mux.Lock() | ||||
| 	l.buf.WriteString("INFO: " + fmt.Sprintf(msg, args...) + "\n") | ||||
| 	l.mux.Unlock() | ||||
| } | ||||
|  | ||||
| // String returns string representation of the underlying buffer. | ||||
| func (l *BytesBufferLogger) String() string { | ||||
| 	l.mux.Lock() | ||||
| 	defer l.mux.Unlock() | ||||
| 	return l.buf.String() | ||||
| } | ||||
|  | ||||
| // Flush empties the underlying buffer. | ||||
| func (l *BytesBufferLogger) Flush() { | ||||
| 	l.mux.Lock() | ||||
| 	defer l.mux.Unlock() | ||||
| 	l.buf.Reset() | ||||
| } | ||||
							
								
								
									
										53
									
								
								vendor/github.com/uber/jaeger-client-go/logger.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										53
									
								
								vendor/github.com/uber/jaeger-client-go/logger.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,53 @@ | ||||
| // Copyright (c) 2017 Uber Technologies, Inc. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package jaeger | ||||
|  | ||||
| import "log" | ||||
|  | ||||
| // NB This will be deprecated in 3.0.0, please use jaeger-client-go/log/logger instead. | ||||
|  | ||||
| // Logger provides an abstract interface for logging from Reporters. | ||||
| // Applications can provide their own implementation of this interface to adapt | ||||
| // reporters logging to whatever logging library they prefer (stdlib log, | ||||
| // logrus, go-logging, etc). | ||||
| type Logger interface { | ||||
| 	// Error logs a message at error priority | ||||
| 	Error(msg string) | ||||
|  | ||||
| 	// Infof logs a message at info priority | ||||
| 	Infof(msg string, args ...interface{}) | ||||
| } | ||||
|  | ||||
| // StdLogger is implementation of the Logger interface that delegates to default `log` package | ||||
| var StdLogger = &stdLogger{} | ||||
|  | ||||
| type stdLogger struct{} | ||||
|  | ||||
| func (l *stdLogger) Error(msg string) { | ||||
| 	log.Printf("ERROR: %s", msg) | ||||
| } | ||||
|  | ||||
| // Infof logs a message at info priority | ||||
| func (l *stdLogger) Infof(msg string, args ...interface{}) { | ||||
| 	log.Printf(msg, args...) | ||||
| } | ||||
|  | ||||
| // NullLogger is implementation of the Logger interface that delegates to default `log` package | ||||
| var NullLogger = &nullLogger{} | ||||
|  | ||||
| type nullLogger struct{} | ||||
|  | ||||
| func (l *nullLogger) Error(msg string)                      {} | ||||
| func (l *nullLogger) Infof(msg string, args ...interface{}) {} | ||||
							
								
								
									
										107
									
								
								vendor/github.com/uber/jaeger-client-go/metrics.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							
							
						
						
									
										107
									
								
								vendor/github.com/uber/jaeger-client-go/metrics.go
									
									
									
										generated
									
									
										vendored
									
									
										Normal file
									
								
							| @@ -0,0 +1,107 @@ | ||||
| // Copyright (c) 2017-2018 Uber Technologies, Inc. | ||||
| // | ||||
| // Licensed under the Apache License, Version 2.0 (the "License"); | ||||
| // you may not use this file except in compliance with the License. | ||||
| // You may obtain a copy of the License at | ||||
| // | ||||
| // http://www.apache.org/licenses/LICENSE-2.0 | ||||
| // | ||||
| // Unless required by applicable law or agreed to in writing, software | ||||
| // distributed under the License is distributed on an "AS IS" BASIS, | ||||
| // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | ||||
| // See the License for the specific language governing permissions and | ||||
| // limitations under the License. | ||||
|  | ||||
| package jaeger | ||||
|  | ||||
| import ( | ||||
| 	"github.com/uber/jaeger-lib/metrics" | ||||
| ) | ||||
|  | ||||
| // Metrics is a container of all stats emitted by Jaeger tracer. | ||||
| type Metrics struct { | ||||
| 	// Number of traces started by this tracer as sampled | ||||
| 	TracesStartedSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=y"` | ||||
|  | ||||
| 	// Number of traces started by this tracer as not sampled | ||||
| 	TracesStartedNotSampled metrics.Counter `metric:"traces" tags:"state=started,sampled=n"` | ||||
|  | ||||
| 	// Number of externally started sampled traces this tracer joined | ||||
| 	TracesJoinedSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=y"` | ||||
|  | ||||
| 	// Number of externally started not-sampled traces this tracer joined | ||||
| 	TracesJoinedNotSampled metrics.Counter `metric:"traces" tags:"state=joined,sampled=n"` | ||||
|  | ||||
| 	// Number of sampled spans started by this tracer | ||||
| 	SpansStartedSampled metrics.Counter `metric:"started_spans" tags:"sampled=y"` | ||||
|  | ||||
| 	// Number of unsampled spans started by this tracer | ||||
| 	SpansStartedNotSampled metrics.Counter `metric:"started_spans" tags:"sampled=n"` | ||||
|  | ||||
| 	// Number of spans finished by this tracer | ||||
| 	SpansFinished metrics.Counter `metric:"finished_spans"` | ||||
|  | ||||
| 	// Number of errors decoding tracing context | ||||
| 	DecodingErrors metrics.Counter `metric:"span_context_decoding_errors"` | ||||
|  | ||||
| 	// Number of spans successfully reported | ||||
| 	ReporterSuccess metrics.Counter `metric:"reporter_spans" tags:"result=ok"` | ||||
|  | ||||
| 	// Number of spans not reported due to a Sender failure | ||||
| 	ReporterFailure metrics.Counter `metric:"reporter_spans" tags:"result=err"` | ||||
|  | ||||
| 	// Number of spans dropped due to internal queue overflow | ||||
| 	ReporterDropped metrics.Counter `metric:"reporter_spans" tags:"result=dropped"` | ||||
|  | ||||
| 	// Current number of spans in the reporter queue | ||||
| 	ReporterQueueLength metrics.Gauge `metric:"reporter_queue_length"` | ||||
|  | ||||
| 	// Number of times the Sampler succeeded to retrieve sampling strategy | ||||
| 	SamplerRetrieved metrics.Counter `metric:"sampler_queries" tags:"result=ok"` | ||||
|  | ||||
| 	// Number of times the Sampler failed to retrieve sampling strategy | ||||
| 	SamplerQueryFailure metrics.Counter `metric:"sampler_queries" tags:"result=err"` | ||||
|  | ||||
| 	// Number of times the Sampler succeeded to retrieve and update sampling strategy | ||||
| 	SamplerUpdated metrics.Counter `metric:"sampler_updates" tags:"result=ok"` | ||||
|  | ||||
| 	// Number of times the Sampler failed to update sampling strategy | ||||
| 	SamplerUpdateFailure metrics.Counter `metric:"sampler_updates" tags:"result=err"` | ||||
|  | ||||
| 	// Number of times baggage was successfully written or updated on spans. | ||||
| 	BaggageUpdateSuccess metrics.Counter `metric:"baggage_updates" tags:"result=ok"` | ||||
|  | ||||
| 	// Number of times baggage failed to write or update on spans. | ||||
| 	BaggageUpdateFailure metrics.Counter `metric:"baggage_updates" tags:"result=err"` | ||||
|  | ||||
| 	// Number of times baggage was truncated as per baggage restrictions. | ||||
| 	BaggageTruncate metrics.Counter `metric:"baggage_truncations"` | ||||
|  | ||||
| 	// Number of times baggage restrictions were successfully updated. | ||||
| 	BaggageRestrictionsUpdateSuccess metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=ok"` | ||||
|  | ||||
| 	// Number of times baggage restrictions failed to update. | ||||
| 	BaggageRestrictionsUpdateFailure metrics.Counter `metric:"baggage_restrictions_updates" tags:"result=err"` | ||||
|  | ||||
| 	// Number of times debug spans were throttled. | ||||
| 	ThrottledDebugSpans metrics.Counter `metric:"throttled_debug_spans"` | ||||
|  | ||||
| 	// Number of times throttler successfully updated. | ||||
| 	ThrottlerUpdateSuccess metrics.Counter `metric:"throttler_updates" tags:"result=ok"` | ||||
|  | ||||
| 	// Number of times throttler failed to update. | ||||
| 	ThrottlerUpdateFailure metrics.Counter `metric:"throttler_updates" tags:"result=err"` | ||||
| } | ||||
|  | ||||
| // NewMetrics creates a new Metrics struct and initializes it. | ||||
| func NewMetrics(factory metrics.Factory, globalTags map[string]string) *Metrics { | ||||
| 	m := &Metrics{} | ||||
| 	// TODO the namespace "jaeger" should be configurable (e.g. in all-in-one "jaeger-client" would make more sense) | ||||
| 	metrics.Init(m, factory.Namespace("jaeger", nil), globalTags) | ||||
| 	return m | ||||
| } | ||||
|  | ||||
| // NewNullMetrics creates a new Metrics struct that won't report any metrics. | ||||
| func NewNullMetrics() *Metrics { | ||||
| 	return NewMetrics(metrics.NullFactory, nil) | ||||
| } | ||||
Some files were not shown because too many files have changed in this diff Show More
		Reference in New Issue
	
	Block a user