mirror of
https://github.com/containous/traefik.git
synced 2024-12-22 13:34:03 +03:00
Instana tracer implementation
This commit is contained in:
parent
c2c6aee18a
commit
aef24dd74b
28
Gopkg.lock
generated
28
Gopkg.lock
generated
@ -1022,6 +1022,14 @@
|
||||
revision = "2d474a3089bcfce6b472779be9470a1f0ef3d5e4"
|
||||
version = "v1.3.7"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:78efd72f12ed0244e5fbe82bd0ecdbaf3e21402ee9176525ef1138a2fc0d3b17"
|
||||
name = "github.com/instana/go-sensor"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "493edb42228321483dd4d59ade71ca93fa89d66b"
|
||||
version = "1.4.12"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ac6d01547ec4f7f673311b4663909269bfb8249952de3279799289467837c3cc"
|
||||
name = "github.com/jmespath/go-jmespath"
|
||||
@ -1111,6 +1119,14 @@
|
||||
revision = "d0d31d8ca62fa3f7e4526ca0ce95de81e4ed001e"
|
||||
version = "v0.5.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8520e78cbe9878f6bf0cfdcfaed5761cd575b3568c260a1c891ac1f5c5c6a726"
|
||||
name = "github.com/looplab/fsm"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "84b5307469f859464403f80919467950a79de1b1"
|
||||
version = "v0.1.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:196b0d7580e898df15a7cc5371cbfe2b8e22904f5c6c883ed5db0130e551c8fb"
|
||||
name = "github.com/mailgun/minheap"
|
||||
@ -1327,6 +1343,17 @@
|
||||
pruneopts = "NUT"
|
||||
revision = "a52f2342449246d5bcc273e65cbdcfa5f7d6c63c"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:cc405544fecfb5a8e0c409127ef67ce3b91d11143a00121e5b822e4f8eabe7d2"
|
||||
name = "github.com/opentracing/basictracer-go"
|
||||
packages = [
|
||||
".",
|
||||
"wire",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "1b32af207119a14b1b231d451df3ed04a72efebf"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:7da29c22bcc5c2ffb308324377dc00b5084650348c2799e573ed226d8cc9faf0"
|
||||
name = "github.com/opentracing/opentracing-go"
|
||||
@ -2326,6 +2353,7 @@
|
||||
"github.com/hashicorp/consul/api",
|
||||
"github.com/hashicorp/go-version",
|
||||
"github.com/influxdata/influxdb/client/v2",
|
||||
"github.com/instana/go-sensor",
|
||||
"github.com/libkermit/compose/check",
|
||||
"github.com/libkermit/docker",
|
||||
"github.com/libkermit/docker-check",
|
||||
|
@ -255,3 +255,7 @@
|
||||
[[constraint]]
|
||||
name = "gopkg.in/DataDog/dd-trace-go.v1"
|
||||
version = "1.7.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "github.com/instana/go-sensor"
|
||||
version = "1.4.12"
|
||||
|
@ -24,6 +24,7 @@ import (
|
||||
"github.com/containous/traefik/provider/marathon"
|
||||
"github.com/containous/traefik/provider/rest"
|
||||
"github.com/containous/traefik/tracing/datadog"
|
||||
"github.com/containous/traefik/tracing/instana"
|
||||
"github.com/containous/traefik/tracing/jaeger"
|
||||
"github.com/containous/traefik/tracing/zipkin"
|
||||
"github.com/containous/traefik/types"
|
||||
@ -114,6 +115,11 @@ func NewTraefikDefaultPointersConfiguration() *TraefikConfiguration {
|
||||
Debug: false,
|
||||
PrioritySampling: false,
|
||||
},
|
||||
Instana: &instana.Config{
|
||||
LocalAgentHost: "localhost",
|
||||
LocalAgentPort: 42699,
|
||||
LogLevel: "info",
|
||||
},
|
||||
}
|
||||
|
||||
// default ApiConfiguration
|
||||
|
@ -27,6 +27,7 @@ import (
|
||||
"github.com/containous/traefik/provider/rest"
|
||||
"github.com/containous/traefik/tls"
|
||||
"github.com/containous/traefik/tracing/datadog"
|
||||
"github.com/containous/traefik/tracing/instana"
|
||||
"github.com/containous/traefik/tracing/jaeger"
|
||||
"github.com/containous/traefik/tracing/zipkin"
|
||||
"github.com/containous/traefik/types"
|
||||
@ -117,12 +118,13 @@ type LifeCycle struct {
|
||||
|
||||
// Tracing holds the tracing configuration.
|
||||
type Tracing struct {
|
||||
Backend string `description:"Selects the tracking backend ('jaeger','zipkin', 'datadog')." export:"true"`
|
||||
Backend string `description:"Selects the tracking backend ('jaeger','zipkin','datadog','instana')." export:"true"`
|
||||
ServiceName string `description:"Set the name for this service" export:"true"`
|
||||
SpanNameLimit int `description:"Set the maximum character limit for Span names (default 0 = no limit)" export:"true"`
|
||||
Jaeger *jaeger.Config `description:"Settings for jaeger"`
|
||||
Zipkin *zipkin.Config `description:"Settings for zipkin"`
|
||||
DataDog *datadog.Config `description:"Settings for DataDog"`
|
||||
Instana *instana.Config `description:"Settings for Instana"`
|
||||
}
|
||||
|
||||
// Providers contains providers configuration
|
||||
@ -244,6 +246,10 @@ func (c *Configuration) initTracing() {
|
||||
log.Warn("DataDog configuration will be ignored")
|
||||
c.Tracing.DataDog = nil
|
||||
}
|
||||
if c.Tracing.Instana != nil {
|
||||
log.Warn("Instana configuration will be ignored")
|
||||
c.Tracing.Instana = nil
|
||||
}
|
||||
case zipkin.Name:
|
||||
if c.Tracing.Zipkin == nil {
|
||||
c.Tracing.Zipkin = &zipkin.Config{
|
||||
@ -262,6 +268,10 @@ func (c *Configuration) initTracing() {
|
||||
log.Warn("DataDog configuration will be ignored")
|
||||
c.Tracing.DataDog = nil
|
||||
}
|
||||
if c.Tracing.Instana != nil {
|
||||
log.Warn("Instana configuration will be ignored")
|
||||
c.Tracing.Instana = nil
|
||||
}
|
||||
case datadog.Name:
|
||||
if c.Tracing.DataDog == nil {
|
||||
c.Tracing.DataDog = &datadog.Config{
|
||||
@ -278,6 +288,30 @@ func (c *Configuration) initTracing() {
|
||||
log.Warn("Jaeger configuration will be ignored")
|
||||
c.Tracing.Jaeger = nil
|
||||
}
|
||||
if c.Tracing.Instana != nil {
|
||||
log.Warn("Instana configuration will be ignored")
|
||||
c.Tracing.Instana = nil
|
||||
}
|
||||
case instana.Name:
|
||||
if c.Tracing.Instana == nil {
|
||||
c.Tracing.Instana = &instana.Config{
|
||||
LocalAgentHost: "localhost",
|
||||
LocalAgentPort: 42699,
|
||||
LogLevel: "info",
|
||||
}
|
||||
}
|
||||
if c.Tracing.Zipkin != nil {
|
||||
log.Warn("Zipkin configuration will be ignored")
|
||||
c.Tracing.Zipkin = nil
|
||||
}
|
||||
if c.Tracing.Jaeger != nil {
|
||||
log.Warn("Jaeger configuration will be ignored")
|
||||
c.Tracing.Jaeger = nil
|
||||
}
|
||||
if c.Tracing.DataDog != nil {
|
||||
log.Warn("DataDog configuration will be ignored")
|
||||
c.Tracing.DataDog = nil
|
||||
}
|
||||
default:
|
||||
log.Warnf("Unknown tracer %q", c.Tracing.Backend)
|
||||
return
|
||||
|
@ -4,7 +4,7 @@ The tracing system allows developers to visualize call flows in their infrastruc
|
||||
|
||||
We use [OpenTracing](http://opentracing.io). It is an open standard designed for distributed tracing.
|
||||
|
||||
Traefik supports three tracing backends: Jaeger, Zipkin and DataDog.
|
||||
Traefik supports four tracing backends: Jaeger, Zipkin, DataDog, and Instana.
|
||||
|
||||
## Jaeger
|
||||
|
||||
@ -188,3 +188,41 @@ Traefik supports three tracing backends: Jaeger, Zipkin and DataDog.
|
||||
#
|
||||
prioritySampling = false
|
||||
```
|
||||
|
||||
## Instana
|
||||
|
||||
```toml
|
||||
# Tracing definition
|
||||
[tracing]
|
||||
# Backend name used to send tracing data
|
||||
#
|
||||
# Default: "jaeger"
|
||||
#
|
||||
backend = "instana"
|
||||
# Service name used in Instana backend
|
||||
#
|
||||
# Default: "traefik"
|
||||
#
|
||||
serviceName = "traefik"
|
||||
[tracing.instana]
|
||||
# Local Agent Host instructs reporter to send spans to instana-agent at this address
|
||||
#
|
||||
# Default: "127.0.0.1"
|
||||
#
|
||||
localAgentHost = "127.0.0.1"
|
||||
# Local Agent port instructs reporter to send spans to the instana-agent at this port
|
||||
#
|
||||
# Default: 42699
|
||||
#
|
||||
localAgentPort = 42699
|
||||
# Set Instana tracer log level
|
||||
#
|
||||
# Default: info
|
||||
# Valid values for logLevel field are:
|
||||
# - error
|
||||
# - warn
|
||||
# - debug
|
||||
# - info
|
||||
#
|
||||
logLevel = "info"
|
||||
```
|
@ -21,6 +21,7 @@ import (
|
||||
"github.com/containous/traefik/server/middleware"
|
||||
"github.com/containous/traefik/tracing"
|
||||
"github.com/containous/traefik/tracing/datadog"
|
||||
"github.com/containous/traefik/tracing/instana"
|
||||
"github.com/containous/traefik/tracing/jaeger"
|
||||
"github.com/containous/traefik/tracing/zipkin"
|
||||
"github.com/containous/traefik/types"
|
||||
@ -60,6 +61,8 @@ func setupTracing(conf *static.Tracing) tracing.TrackingBackend {
|
||||
return conf.Zipkin
|
||||
case datadog.Name:
|
||||
return conf.DataDog
|
||||
case instana.Name:
|
||||
return conf.Instana
|
||||
default:
|
||||
log.WithoutContext().Warnf("Could not initialize tracing: unknown tracer %q", conf.Backend)
|
||||
return nil
|
||||
|
49
tracing/instana/instana.go
Normal file
49
tracing/instana/instana.go
Normal file
@ -0,0 +1,49 @@
|
||||
package instana
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"github.com/containous/traefik/log"
|
||||
"github.com/instana/go-sensor"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
)
|
||||
|
||||
// Name sets the name of this tracer
|
||||
const Name = "instana"
|
||||
|
||||
// Config provides configuration settings for a instana tracer
|
||||
type Config struct {
|
||||
LocalAgentHost string `description:"Set instana-agent's host that the reporter will used." export:"false"`
|
||||
LocalAgentPort int `description:"Set instana-agent's port that the reporter will used." export:"false"`
|
||||
LogLevel string `description:"Set instana-agent's log level. ('error','warn','info','debug')" export:"false"`
|
||||
}
|
||||
|
||||
// Setup sets up the tracer
|
||||
func (c *Config) Setup(serviceName string) (opentracing.Tracer, io.Closer, error) {
|
||||
// set default logLevel
|
||||
logLevel := instana.Info
|
||||
|
||||
// check/set logLevel overrides
|
||||
switch c.LogLevel {
|
||||
case "error":
|
||||
logLevel = instana.Error
|
||||
case "warn":
|
||||
logLevel = instana.Warn
|
||||
case "debug":
|
||||
logLevel = instana.Debug
|
||||
}
|
||||
|
||||
tracer := instana.NewTracerWithOptions(&instana.Options{
|
||||
Service: serviceName,
|
||||
LogLevel: logLevel,
|
||||
AgentPort: c.LocalAgentPort,
|
||||
AgentHost: c.LocalAgentHost,
|
||||
})
|
||||
|
||||
// Without this, child spans are getting the NOOP tracer
|
||||
opentracing.SetGlobalTracer(tracer)
|
||||
|
||||
log.WithoutContext().Debug("Instana tracer configured")
|
||||
|
||||
return tracer, nil, nil
|
||||
}
|
21
vendor/github.com/instana/go-sensor/LICENSE.md
generated
vendored
Normal file
21
vendor/github.com/instana/go-sensor/LICENSE.md
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2016 Instana
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
183
vendor/github.com/instana/go-sensor/agent.go
generated
vendored
Normal file
183
vendor/github.com/instana/go-sensor/agent.go
generated
vendored
Normal file
@ -0,0 +1,183 @@
|
||||
package instana
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
agentDiscoveryURL = "/com.instana.plugin.golang.discovery"
|
||||
agentTracesURL = "/com.instana.plugin.golang/traces."
|
||||
agentDataURL = "/com.instana.plugin.golang."
|
||||
agentEventURL = "/com.instana.plugin.generic.event"
|
||||
agentDefaultHost = "localhost"
|
||||
agentDefaultPort = 42699
|
||||
agentHeader = "Instana Agent"
|
||||
)
|
||||
|
||||
type agentResponse struct {
|
||||
Pid uint32 `json:"pid"`
|
||||
HostID string `json:"agentUuid"`
|
||||
}
|
||||
|
||||
type discoveryS struct {
|
||||
PID int `json:"pid"`
|
||||
Name string `json:"name"`
|
||||
Args []string `json:"args"`
|
||||
Fd string `json:"fd"`
|
||||
Inode string `json:"inode"`
|
||||
}
|
||||
|
||||
type fromS struct {
|
||||
PID string `json:"e"`
|
||||
HostID string `json:"h"`
|
||||
}
|
||||
|
||||
type agentS struct {
|
||||
sensor *sensorS
|
||||
fsm *fsmS
|
||||
from *fromS
|
||||
host string
|
||||
client *http.Client
|
||||
}
|
||||
|
||||
func (r *agentS) init() {
|
||||
r.client = &http.Client{Timeout: 5 * time.Second}
|
||||
r.fsm = r.initFsm()
|
||||
r.setFrom(&fromS{})
|
||||
}
|
||||
|
||||
func (r *agentS) makeURL(prefix string) string {
|
||||
return r.makeHostURL(r.host, prefix)
|
||||
}
|
||||
|
||||
func (r *agentS) makeHostURL(host string, prefix string) string {
|
||||
envPort := os.Getenv("INSTANA_AGENT_PORT")
|
||||
port := agentDefaultPort
|
||||
if r.sensor.options.AgentPort > 0 {
|
||||
return r.makeFullURL(host, r.sensor.options.AgentPort, prefix)
|
||||
}
|
||||
if envPort == "" {
|
||||
return r.makeFullURL(host, port, prefix)
|
||||
}
|
||||
port, err := strconv.Atoi(envPort)
|
||||
if err != nil {
|
||||
return r.makeFullURL(host, agentDefaultPort, prefix)
|
||||
}
|
||||
return r.makeFullURL(host, port, prefix)
|
||||
}
|
||||
|
||||
func (r *agentS) makeFullURL(host string, port int, prefix string) string {
|
||||
var buffer bytes.Buffer
|
||||
|
||||
buffer.WriteString("http://")
|
||||
buffer.WriteString(host)
|
||||
buffer.WriteString(":")
|
||||
buffer.WriteString(strconv.Itoa(port))
|
||||
buffer.WriteString(prefix)
|
||||
if prefix[len(prefix)-1:] == "." && r.from.PID != "" {
|
||||
buffer.WriteString(r.from.PID)
|
||||
}
|
||||
|
||||
return buffer.String()
|
||||
}
|
||||
|
||||
func (r *agentS) head(url string) (string, error) {
|
||||
return r.request(url, "HEAD", nil)
|
||||
}
|
||||
|
||||
func (r *agentS) request(url string, method string, data interface{}) (string, error) {
|
||||
return r.fullRequestResponse(url, method, data, nil, "")
|
||||
}
|
||||
|
||||
func (r *agentS) requestResponse(url string, method string, data interface{}, ret interface{}) (string, error) {
|
||||
return r.fullRequestResponse(url, method, data, ret, "")
|
||||
}
|
||||
|
||||
func (r *agentS) requestHeader(url string, method string, header string) (string, error) {
|
||||
return r.fullRequestResponse(url, method, nil, nil, header)
|
||||
}
|
||||
|
||||
func (r *agentS) fullRequestResponse(url string, method string, data interface{}, body interface{}, header string) (string, error) {
|
||||
var j []byte
|
||||
var ret string
|
||||
var err error
|
||||
var resp *http.Response
|
||||
var req *http.Request
|
||||
if data != nil {
|
||||
j, err = json.Marshal(data)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
if j != nil {
|
||||
req, err = http.NewRequest(method, url, bytes.NewBuffer(j))
|
||||
} else {
|
||||
req, err = http.NewRequest(method, url, nil)
|
||||
}
|
||||
|
||||
if err == nil {
|
||||
req.Header.Set("Content-Type", "application/json")
|
||||
resp, err = r.client.Do(req)
|
||||
if err == nil {
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 300 {
|
||||
err = errors.New(resp.Status)
|
||||
} else {
|
||||
|
||||
log.debug("agent response:", url, resp.Status)
|
||||
|
||||
if body != nil {
|
||||
var b []byte
|
||||
b, err = ioutil.ReadAll(resp.Body)
|
||||
json.Unmarshal(b, body)
|
||||
}
|
||||
|
||||
if header != "" {
|
||||
ret = resp.Header.Get(header)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
// Ignore errors while in announced stated (before ready) as
|
||||
// this is the time where the entity is registering in the Instana
|
||||
// backend and it will return 404 until it's done.
|
||||
if !r.sensor.agent.fsm.fsm.Is("announced") {
|
||||
log.info(err, url)
|
||||
}
|
||||
}
|
||||
|
||||
return ret, err
|
||||
}
|
||||
|
||||
func (r *agentS) setFrom(from *fromS) {
|
||||
r.from = from
|
||||
}
|
||||
|
||||
func (r *agentS) setHost(host string) {
|
||||
r.host = host
|
||||
}
|
||||
|
||||
func (r *agentS) reset() {
|
||||
r.fsm.reset()
|
||||
}
|
||||
|
||||
func (r *sensorS) initAgent() *agentS {
|
||||
|
||||
log.debug("initializing agent")
|
||||
|
||||
ret := new(agentS)
|
||||
ret.sensor = r
|
||||
ret.init()
|
||||
|
||||
return ret
|
||||
}
|
42
vendor/github.com/instana/go-sensor/context.go
generated
vendored
Normal file
42
vendor/github.com/instana/go-sensor/context.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
package instana
|
||||
|
||||
// SpanContext holds the basic Span metadata.
|
||||
type SpanContext struct {
|
||||
// A probabilistically unique identifier for a [multi-span] trace.
|
||||
TraceID int64
|
||||
|
||||
// A probabilistically unique identifier for a span.
|
||||
SpanID int64
|
||||
|
||||
// Whether the trace is sampled.
|
||||
Sampled bool
|
||||
|
||||
// The span's associated baggage.
|
||||
Baggage map[string]string // initialized on first use
|
||||
}
|
||||
|
||||
// ForeachBaggageItem belongs to the opentracing.SpanContext interface
|
||||
func (c SpanContext) ForeachBaggageItem(handler func(k, v string) bool) {
|
||||
for k, v := range c.Baggage {
|
||||
if !handler(k, v) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithBaggageItem returns an entirely new SpanContext with the
|
||||
// given key:value baggage pair set.
|
||||
func (c SpanContext) WithBaggageItem(key, val string) SpanContext {
|
||||
var newBaggage map[string]string
|
||||
if c.Baggage == nil {
|
||||
newBaggage = map[string]string{key: val}
|
||||
} else {
|
||||
newBaggage = make(map[string]string, len(c.Baggage)+1)
|
||||
for k, v := range c.Baggage {
|
||||
newBaggage[k] = v
|
||||
}
|
||||
newBaggage[key] = val
|
||||
}
|
||||
// Use positional parameters so the compiler will help catch new fields.
|
||||
return SpanContext{c.TraceID, c.SpanID, c.Sampled, newBaggage}
|
||||
}
|
37
vendor/github.com/instana/go-sensor/eum.go
generated
vendored
Normal file
37
vendor/github.com/instana/go-sensor/eum.go
generated
vendored
Normal file
@ -0,0 +1,37 @@
|
||||
package instana
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const eumTemplate string = "eum.js"
|
||||
|
||||
// EumSnippet generates javascript code to initialize JavaScript agent
|
||||
func EumSnippet(apiKey string, traceID string, meta map[string]string) string {
|
||||
|
||||
if len(apiKey) == 0 || len(traceID) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
b, err := ioutil.ReadFile(eumTemplate)
|
||||
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
var snippet = string(b)
|
||||
var metaBuffer bytes.Buffer
|
||||
|
||||
snippet = strings.Replace(snippet, "$apiKey", apiKey, -1)
|
||||
snippet = strings.Replace(snippet, "$traceId", traceID, -1)
|
||||
|
||||
for key, value := range meta {
|
||||
metaBuffer.WriteString(" ineum('meta', '" + key + "', '" + value + "');\n")
|
||||
}
|
||||
|
||||
snippet = strings.Replace(snippet, "$meta", metaBuffer.String(), -1)
|
||||
|
||||
return snippet
|
||||
}
|
78
vendor/github.com/instana/go-sensor/event.go
generated
vendored
Normal file
78
vendor/github.com/instana/go-sensor/event.go
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
package instana
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// EventData is the construct serialized for the host agent
|
||||
type EventData struct {
|
||||
Title string `json:"title"`
|
||||
Text string `json:"text"`
|
||||
// Duration in milliseconds
|
||||
Duration int `json:"duration"`
|
||||
// Severity with value of -1, 5, 10 : see type severity
|
||||
Severity int `json:"severity"`
|
||||
Plugin string `json:"plugin,omitempty"`
|
||||
ID string `json:"id,omitempty"`
|
||||
Host string `json:"host"`
|
||||
}
|
||||
|
||||
type severity int
|
||||
|
||||
//Severity values for events sent to the instana agent
|
||||
const (
|
||||
SeverityChange severity = -1
|
||||
SeverityWarning severity = 5
|
||||
SeverityCritical severity = 10
|
||||
)
|
||||
|
||||
// Defaults for the Event API
|
||||
const (
|
||||
ServicePlugin = "com.instana.forge.connection.http.logical.LogicalWebApp"
|
||||
ServiceHost = ""
|
||||
)
|
||||
|
||||
// SendDefaultServiceEvent sends a default event which already contains the service and host
|
||||
func SendDefaultServiceEvent(title string, text string, sev severity, duration time.Duration) {
|
||||
if sensor == nil {
|
||||
// Since no sensor was initialized, there is no default service (as
|
||||
// configured on the sensor) so we send blank.
|
||||
SendServiceEvent("", title, text, sev, duration)
|
||||
} else {
|
||||
SendServiceEvent(sensor.serviceName, title, text, sev, duration)
|
||||
}
|
||||
}
|
||||
|
||||
// SendServiceEvent send an event on a specific service
|
||||
func SendServiceEvent(service string, title string, text string, sev severity, duration time.Duration) {
|
||||
sendEvent(&EventData{
|
||||
Title: title,
|
||||
Text: text,
|
||||
Severity: int(sev),
|
||||
Plugin: ServicePlugin,
|
||||
ID: service,
|
||||
Host: ServiceHost,
|
||||
Duration: int(duration / time.Millisecond),
|
||||
})
|
||||
}
|
||||
|
||||
// SendHostEvent send an event on the current host
|
||||
func SendHostEvent(title string, text string, sev severity, duration time.Duration) {
|
||||
sendEvent(&EventData{
|
||||
Title: title,
|
||||
Text: text,
|
||||
Duration: int(duration / time.Millisecond),
|
||||
Severity: int(sev),
|
||||
})
|
||||
}
|
||||
|
||||
func sendEvent(event *EventData) {
|
||||
if sensor == nil {
|
||||
// If the sensor hasn't initialized we do so here so that we properly
|
||||
// discover where the host agent may be as it varies between a
|
||||
// normal host, docker, kubernetes etc..
|
||||
InitSensor(&Options{})
|
||||
}
|
||||
//we do fire & forget here, because the whole pid dance isn't necessary to send events
|
||||
go sensor.agent.request(sensor.agent.makeURL(agentEventURL), "POST", event)
|
||||
}
|
242
vendor/github.com/instana/go-sensor/fsm.go
generated
vendored
Normal file
242
vendor/github.com/instana/go-sensor/fsm.go
generated
vendored
Normal file
@ -0,0 +1,242 @@
|
||||
package instana
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
f "github.com/looplab/fsm"
|
||||
)
|
||||
|
||||
const (
|
||||
eInit = "init"
|
||||
eLookup = "lookup"
|
||||
eAnnounce = "announce"
|
||||
eTest = "test"
|
||||
|
||||
retryPeriod = 30 * 1000
|
||||
maximumRetries = 2
|
||||
)
|
||||
|
||||
type fsmS struct {
|
||||
agent *agentS
|
||||
fsm *f.FSM
|
||||
timer *time.Timer
|
||||
retries int
|
||||
}
|
||||
|
||||
func (r *fsmS) init() {
|
||||
|
||||
log.warn("Stan is on the scene. Starting Instana instrumentation.")
|
||||
log.debug("initializing fsm")
|
||||
|
||||
r.fsm = f.NewFSM(
|
||||
"none",
|
||||
f.Events{
|
||||
{Name: eInit, Src: []string{"none", "unannounced", "announced", "ready"}, Dst: "init"},
|
||||
{Name: eLookup, Src: []string{"init"}, Dst: "unannounced"},
|
||||
{Name: eAnnounce, Src: []string{"unannounced"}, Dst: "announced"},
|
||||
{Name: eTest, Src: []string{"announced"}, Dst: "ready"}},
|
||||
f.Callbacks{
|
||||
"init": r.lookupAgentHost,
|
||||
"enter_unannounced": r.announceSensor,
|
||||
"enter_announced": r.testAgent})
|
||||
|
||||
r.retries = maximumRetries
|
||||
r.fsm.Event(eInit)
|
||||
}
|
||||
|
||||
func (r *fsmS) scheduleRetry(e *f.Event, cb func(e *f.Event)) {
|
||||
r.timer = time.NewTimer(retryPeriod * time.Millisecond)
|
||||
go func() {
|
||||
<-r.timer.C
|
||||
cb(e)
|
||||
}()
|
||||
}
|
||||
|
||||
func (r *fsmS) lookupAgentHost(e *f.Event) {
|
||||
cb := func(b bool, host string) {
|
||||
if b {
|
||||
r.lookupSuccess(host)
|
||||
} else {
|
||||
gateway := r.getDefaultGateway()
|
||||
if gateway != "" {
|
||||
go r.checkHost(gateway, func(b bool, host string) {
|
||||
if b {
|
||||
r.lookupSuccess(host)
|
||||
} else {
|
||||
log.error("Cannot connect to the agent through localhost or default gateway. Scheduling retry.")
|
||||
r.scheduleRetry(e, r.lookupAgentHost)
|
||||
}
|
||||
})
|
||||
} else {
|
||||
log.error("Default gateway not available. Scheduling retry")
|
||||
r.scheduleRetry(e, r.lookupAgentHost)
|
||||
}
|
||||
}
|
||||
}
|
||||
hostNames := []string{
|
||||
r.agent.sensor.options.AgentHost,
|
||||
os.Getenv("INSTANA_AGENT_HOST"),
|
||||
agentDefaultHost,
|
||||
}
|
||||
for _, name := range hostNames {
|
||||
if name != "" {
|
||||
go r.checkHost(name, cb)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *fsmS) getDefaultGateway() string {
|
||||
out, _ := exec.Command("/bin/sh", "-c", "/sbin/ip route | awk '/default/' | cut -d ' ' -f 3 | tr -d '\n'").Output()
|
||||
|
||||
log.debug("checking default gateway", string(out[:]))
|
||||
|
||||
return string(out[:])
|
||||
}
|
||||
|
||||
func (r *fsmS) checkHost(host string, cb func(b bool, host string)) {
|
||||
log.debug("checking host", host)
|
||||
|
||||
header, err := r.agent.requestHeader(r.agent.makeHostURL(host, "/"), "GET", "Server")
|
||||
|
||||
cb(err == nil && header == agentHeader, host)
|
||||
}
|
||||
|
||||
func (r *fsmS) lookupSuccess(host string) {
|
||||
log.debug("agent lookup success", host)
|
||||
|
||||
r.agent.setHost(host)
|
||||
r.retries = maximumRetries
|
||||
r.fsm.Event(eLookup)
|
||||
}
|
||||
|
||||
func (r *fsmS) announceSensor(e *f.Event) {
|
||||
cb := func(b bool, from *fromS) {
|
||||
if b {
|
||||
log.info("Host agent available. We're in business. Announced pid:", from.PID)
|
||||
r.agent.setFrom(from)
|
||||
r.retries = maximumRetries
|
||||
r.fsm.Event(eAnnounce)
|
||||
} else {
|
||||
log.error("Cannot announce sensor. Scheduling retry.")
|
||||
r.retries--
|
||||
if r.retries > 0 {
|
||||
r.scheduleRetry(e, r.announceSensor)
|
||||
} else {
|
||||
r.fsm.Event(eInit)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.debug("announcing sensor to the agent")
|
||||
|
||||
go func(cb func(b bool, from *fromS)) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
log.debug("Announce recovered:", r)
|
||||
}
|
||||
}()
|
||||
|
||||
pid := 0
|
||||
schedFile := fmt.Sprintf("/proc/%d/sched", os.Getpid())
|
||||
if _, err := os.Stat(schedFile); err == nil {
|
||||
sf, err := os.Open(schedFile)
|
||||
defer sf.Close()
|
||||
if err == nil {
|
||||
fscanner := bufio.NewScanner(sf)
|
||||
fscanner.Scan()
|
||||
primaLinea := fscanner.Text()
|
||||
|
||||
r := regexp.MustCompile("\\((\\d+),")
|
||||
match := r.FindStringSubmatch(primaLinea)
|
||||
i, err := strconv.Atoi(match[1])
|
||||
if err == nil {
|
||||
pid = i
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if pid == 0 {
|
||||
pid = os.Getpid()
|
||||
}
|
||||
|
||||
d := &discoveryS{PID: pid}
|
||||
d.Name, d.Args = getCommandLine()
|
||||
|
||||
if _, err := os.Stat("/proc"); err == nil {
|
||||
if addr, err := net.ResolveTCPAddr("tcp", r.agent.host+":42699"); err == nil {
|
||||
if tcpConn, err := net.DialTCP("tcp", nil, addr); err == nil {
|
||||
defer tcpConn.Close()
|
||||
|
||||
f, err := tcpConn.File()
|
||||
|
||||
if err != nil {
|
||||
log.error(err)
|
||||
} else {
|
||||
d.Fd = fmt.Sprintf("%v", f.Fd())
|
||||
|
||||
link := fmt.Sprintf("/proc/%d/fd/%d", os.Getpid(), f.Fd())
|
||||
if _, err := os.Stat(link); err == nil {
|
||||
d.Inode, _ = os.Readlink(link)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ret := &agentResponse{}
|
||||
_, err := r.agent.requestResponse(r.agent.makeURL(agentDiscoveryURL), "PUT", d, ret)
|
||||
cb(err == nil,
|
||||
&fromS{
|
||||
PID: strconv.Itoa(int(ret.Pid)),
|
||||
HostID: ret.HostID})
|
||||
}(cb)
|
||||
}
|
||||
|
||||
func (r *fsmS) testAgent(e *f.Event) {
|
||||
cb := func(b bool) {
|
||||
if b {
|
||||
r.retries = maximumRetries
|
||||
r.fsm.Event(eTest)
|
||||
} else {
|
||||
log.debug("Agent is not yet ready. Scheduling retry.")
|
||||
r.retries--
|
||||
if r.retries > 0 {
|
||||
r.scheduleRetry(e, r.testAgent)
|
||||
} else {
|
||||
r.fsm.Event(eInit)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
log.debug("testing communication with the agent")
|
||||
|
||||
go func(cb func(b bool)) {
|
||||
_, err := r.agent.head(r.agent.makeURL(agentDataURL))
|
||||
cb(err == nil)
|
||||
}(cb)
|
||||
}
|
||||
|
||||
func (r *fsmS) reset() {
|
||||
r.retries = maximumRetries
|
||||
r.fsm.Event(eInit)
|
||||
}
|
||||
|
||||
func (r *agentS) initFsm() *fsmS {
|
||||
ret := new(fsmS)
|
||||
ret.agent = r
|
||||
ret.init()
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func (r *agentS) canSend() bool {
|
||||
return r.fsm.fsm.Current() == "ready"
|
||||
}
|
38
vendor/github.com/instana/go-sensor/json_span.go
generated
vendored
Normal file
38
vendor/github.com/instana/go-sensor/json_span.go
generated
vendored
Normal file
@ -0,0 +1,38 @@
|
||||
package instana
|
||||
|
||||
import (
|
||||
ot "github.com/opentracing/opentracing-go"
|
||||
)
|
||||
|
||||
type jsonSpan struct {
|
||||
TraceID int64 `json:"t"`
|
||||
ParentID *int64 `json:"p,omitempty"`
|
||||
SpanID int64 `json:"s"`
|
||||
Timestamp uint64 `json:"ts"`
|
||||
Duration uint64 `json:"d"`
|
||||
Name string `json:"n"`
|
||||
From *fromS `json:"f"`
|
||||
Error bool `json:"error"`
|
||||
Ec int `json:"ec,omitempty"`
|
||||
Lang string `json:"ta,omitempty"`
|
||||
Data *jsonData `json:"data"`
|
||||
}
|
||||
|
||||
type jsonData struct {
|
||||
Service string `json:"service,omitempty"`
|
||||
SDK *jsonSDKData `json:"sdk"`
|
||||
}
|
||||
|
||||
type jsonCustomData struct {
|
||||
Tags ot.Tags `json:"tags,omitempty"`
|
||||
Logs map[uint64]map[string]interface{} `json:"logs,omitempty"`
|
||||
Baggage map[string]string `json:"baggage,omitempty"`
|
||||
}
|
||||
|
||||
type jsonSDKData struct {
|
||||
Name string `json:"name"`
|
||||
Type string `json:"type,omitempty"`
|
||||
Arguments string `json:"arguments,omitempty"`
|
||||
Return string `json:"return,omitempty"`
|
||||
Custom *jsonCustomData `json:"custom,omitempty"`
|
||||
}
|
52
vendor/github.com/instana/go-sensor/log.go
generated
vendored
Normal file
52
vendor/github.com/instana/go-sensor/log.go
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
package instana
|
||||
|
||||
import (
|
||||
l "log"
|
||||
)
|
||||
|
||||
// Valid log levels
|
||||
const (
|
||||
Error = 0
|
||||
Warn = 1
|
||||
Info = 2
|
||||
Debug = 3
|
||||
)
|
||||
|
||||
type logS struct {
|
||||
sensor *sensorS
|
||||
}
|
||||
|
||||
var log *logS
|
||||
|
||||
func (r *logS) makeV(prefix string, v ...interface{}) []interface{} {
|
||||
return append([]interface{}{prefix}, v...)
|
||||
}
|
||||
|
||||
func (r *logS) debug(v ...interface{}) {
|
||||
if r.sensor.options.LogLevel >= Debug {
|
||||
l.Println(r.makeV("DEBUG: instana:", v...)...)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *logS) info(v ...interface{}) {
|
||||
if r.sensor.options.LogLevel >= Info {
|
||||
l.Println(r.makeV("INFO: instana:", v...)...)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *logS) warn(v ...interface{}) {
|
||||
if r.sensor.options.LogLevel >= Warn {
|
||||
l.Println(r.makeV("WARN: instana:", v...)...)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *logS) error(v ...interface{}) {
|
||||
if r.sensor.options.LogLevel >= Error {
|
||||
l.Println(r.makeV("ERROR: instana:", v...)...)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *sensorS) initLog() {
|
||||
log = new(logS)
|
||||
log.sensor = r
|
||||
}
|
157
vendor/github.com/instana/go-sensor/meter.go
generated
vendored
Normal file
157
vendor/github.com/instana/go-sensor/meter.go
generated
vendored
Normal file
@ -0,0 +1,157 @@
|
||||
package instana
|
||||
|
||||
import (
|
||||
"runtime"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// SnapshotPeriod is the amount of time in seconds between snapshot reports.
|
||||
SnapshotPeriod = 600
|
||||
)
|
||||
|
||||
// SnapshotS struct to hold snapshot data.
|
||||
type SnapshotS struct {
|
||||
Name string `json:"name"`
|
||||
Version string `json:"version"`
|
||||
Root string `json:"goroot"`
|
||||
MaxProcs int `json:"maxprocs"`
|
||||
Compiler string `json:"compiler"`
|
||||
NumCPU int `json:"cpu"`
|
||||
}
|
||||
|
||||
// MemoryS struct to hold snapshot data.
|
||||
type MemoryS struct {
|
||||
Alloc uint64 `json:"alloc"`
|
||||
TotalAlloc uint64 `json:"total_alloc"`
|
||||
Sys uint64 `json:"sys"`
|
||||
Lookups uint64 `json:"lookups"`
|
||||
Mallocs uint64 `json:"mallocs"`
|
||||
Frees uint64 `json:"frees"`
|
||||
HeapAlloc uint64 `json:"heap_alloc"`
|
||||
HeapSys uint64 `json:"heap_sys"`
|
||||
HeapIdle uint64 `json:"heap_idle"`
|
||||
HeapInuse uint64 `json:"heap_in_use"`
|
||||
HeapReleased uint64 `json:"heap_released"`
|
||||
HeapObjects uint64 `json:"heap_objects"`
|
||||
PauseTotalNs uint64 `json:"pause_total_ns"`
|
||||
PauseNs uint64 `json:"pause_ns"`
|
||||
NumGC uint32 `json:"num_gc"`
|
||||
GCCPUFraction float64 `json:"gc_cpu_fraction"`
|
||||
}
|
||||
|
||||
// MetricsS struct to hold snapshot data.
|
||||
type MetricsS struct {
|
||||
CgoCall int64 `json:"cgo_call"`
|
||||
Goroutine int `json:"goroutine"`
|
||||
Memory *MemoryS `json:"memory"`
|
||||
}
|
||||
|
||||
// EntityData struct to hold snapshot data.
|
||||
type EntityData struct {
|
||||
PID int `json:"pid"`
|
||||
Snapshot *SnapshotS `json:"snapshot,omitempty"`
|
||||
Metrics *MetricsS `json:"metrics"`
|
||||
}
|
||||
|
||||
type meterS struct {
|
||||
sensor *sensorS
|
||||
numGC uint32
|
||||
ticker *time.Ticker
|
||||
snapshotCountdown int
|
||||
}
|
||||
|
||||
func (r *meterS) init() {
|
||||
r.ticker = time.NewTicker(1 * time.Second)
|
||||
go func() {
|
||||
r.snapshotCountdown = 1
|
||||
for range r.ticker.C {
|
||||
if r.sensor.agent.canSend() {
|
||||
r.snapshotCountdown--
|
||||
var s *SnapshotS
|
||||
if r.snapshotCountdown == 0 {
|
||||
r.snapshotCountdown = SnapshotPeriod
|
||||
s = r.collectSnapshot()
|
||||
log.debug("collected snapshot")
|
||||
} else {
|
||||
s = nil
|
||||
}
|
||||
|
||||
pid, _ := strconv.Atoi(r.sensor.agent.from.PID)
|
||||
d := &EntityData{
|
||||
PID: pid,
|
||||
Snapshot: s,
|
||||
Metrics: r.collectMetrics()}
|
||||
|
||||
go r.send(d)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
func (r *meterS) send(d *EntityData) {
|
||||
_, err := r.sensor.agent.request(r.sensor.agent.makeURL(agentDataURL), "POST", d)
|
||||
|
||||
if err != nil {
|
||||
r.sensor.agent.reset()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *meterS) collectMemoryMetrics() *MemoryS {
|
||||
var memStats runtime.MemStats
|
||||
runtime.ReadMemStats(&memStats)
|
||||
ret := &MemoryS{
|
||||
Alloc: memStats.Alloc,
|
||||
TotalAlloc: memStats.TotalAlloc,
|
||||
Sys: memStats.Sys,
|
||||
Lookups: memStats.Lookups,
|
||||
Mallocs: memStats.Mallocs,
|
||||
Frees: memStats.Frees,
|
||||
HeapAlloc: memStats.HeapAlloc,
|
||||
HeapSys: memStats.HeapSys,
|
||||
HeapIdle: memStats.HeapIdle,
|
||||
HeapInuse: memStats.HeapInuse,
|
||||
HeapReleased: memStats.HeapReleased,
|
||||
HeapObjects: memStats.HeapObjects,
|
||||
PauseTotalNs: memStats.PauseTotalNs,
|
||||
NumGC: memStats.NumGC,
|
||||
GCCPUFraction: memStats.GCCPUFraction}
|
||||
|
||||
if r.numGC < memStats.NumGC {
|
||||
ret.PauseNs = memStats.PauseNs[(memStats.NumGC+255)%256]
|
||||
r.numGC = memStats.NumGC
|
||||
} else {
|
||||
ret.PauseNs = 0
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func (r *meterS) collectMetrics() *MetricsS {
|
||||
return &MetricsS{
|
||||
CgoCall: runtime.NumCgoCall(),
|
||||
Goroutine: runtime.NumGoroutine(),
|
||||
Memory: r.collectMemoryMetrics()}
|
||||
}
|
||||
|
||||
func (r *meterS) collectSnapshot() *SnapshotS {
|
||||
return &SnapshotS{
|
||||
Name: r.sensor.serviceName,
|
||||
Version: runtime.Version(),
|
||||
Root: runtime.GOROOT(),
|
||||
MaxProcs: runtime.GOMAXPROCS(0),
|
||||
Compiler: runtime.Compiler,
|
||||
NumCPU: runtime.NumCPU()}
|
||||
}
|
||||
|
||||
func (r *sensorS) initMeter() *meterS {
|
||||
|
||||
log.debug("initializing meter")
|
||||
|
||||
ret := new(meterS)
|
||||
ret.sensor = r
|
||||
ret.init()
|
||||
|
||||
return ret
|
||||
}
|
12
vendor/github.com/instana/go-sensor/options.go
generated
vendored
Normal file
12
vendor/github.com/instana/go-sensor/options.go
generated
vendored
Normal file
@ -0,0 +1,12 @@
|
||||
package instana
|
||||
|
||||
// Options allows the user to configure the to-be-initialized
|
||||
// sensor
|
||||
type Options struct {
|
||||
Service string
|
||||
AgentHost string
|
||||
AgentPort int
|
||||
MaxBufferedSpans int
|
||||
ForceTransmissionStartingAt int
|
||||
LogLevel int
|
||||
}
|
165
vendor/github.com/instana/go-sensor/propagation.go
generated
vendored
Normal file
165
vendor/github.com/instana/go-sensor/propagation.go
generated
vendored
Normal file
@ -0,0 +1,165 @@
|
||||
package instana
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
ot "github.com/opentracing/opentracing-go"
|
||||
)
|
||||
|
||||
type textMapPropagator struct {
|
||||
tracer *tracerS
|
||||
}
|
||||
|
||||
// Instana header constants
|
||||
const (
|
||||
// FieldT Trace ID header
|
||||
FieldT = "x-instana-t"
|
||||
// FieldS Span ID header
|
||||
FieldS = "x-instana-s"
|
||||
// FieldL Level header
|
||||
FieldL = "x-instana-l"
|
||||
// FieldB OT Baggage header
|
||||
FieldB = "x-instana-b-"
|
||||
fieldCount = 2
|
||||
)
|
||||
|
||||
func (r *textMapPropagator) inject(spanContext ot.SpanContext, opaqueCarrier interface{}) error {
|
||||
sc, ok := spanContext.(SpanContext)
|
||||
if !ok {
|
||||
return ot.ErrInvalidSpanContext
|
||||
}
|
||||
|
||||
roCarrier, ok := opaqueCarrier.(ot.TextMapReader)
|
||||
if !ok {
|
||||
return ot.ErrInvalidCarrier
|
||||
}
|
||||
|
||||
// Handle pre-existing case-sensitive keys
|
||||
var (
|
||||
exstfieldT = FieldT
|
||||
exstfieldS = FieldS
|
||||
exstfieldL = FieldL
|
||||
exstfieldB = FieldB
|
||||
)
|
||||
|
||||
roCarrier.ForeachKey(func(k, v string) error {
|
||||
switch strings.ToLower(k) {
|
||||
case FieldT:
|
||||
exstfieldT = k
|
||||
case FieldS:
|
||||
exstfieldS = k
|
||||
case FieldL:
|
||||
exstfieldL = k
|
||||
default:
|
||||
if strings.HasPrefix(strings.ToLower(k), FieldB) {
|
||||
exstfieldB = string([]rune(k)[0:len(FieldB)])
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
|
||||
carrier, ok := opaqueCarrier.(ot.TextMapWriter)
|
||||
if !ok {
|
||||
return ot.ErrInvalidCarrier
|
||||
}
|
||||
|
||||
hhcarrier, ok := opaqueCarrier.(ot.HTTPHeadersCarrier)
|
||||
if ok {
|
||||
// If http.Headers has pre-existing keys, calling Set() like we do
|
||||
// below will just append to those existing values and break context
|
||||
// propagation. So defend against that case, we delete any pre-existing
|
||||
// keys entirely first.
|
||||
y := http.Header(hhcarrier)
|
||||
y.Del(exstfieldT)
|
||||
y.Del(exstfieldS)
|
||||
y.Del(exstfieldL)
|
||||
|
||||
for key := range y {
|
||||
if strings.HasPrefix(strings.ToLower(key), FieldB) {
|
||||
y.Del(key)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if instanaTID, err := ID2Header(sc.TraceID); err == nil {
|
||||
carrier.Set(exstfieldT, instanaTID)
|
||||
} else {
|
||||
log.debug(err)
|
||||
}
|
||||
if instanaSID, err := ID2Header(sc.SpanID); err == nil {
|
||||
carrier.Set(exstfieldS, instanaSID)
|
||||
} else {
|
||||
log.debug(err)
|
||||
}
|
||||
carrier.Set(exstfieldL, strconv.Itoa(1))
|
||||
|
||||
for k, v := range sc.Baggage {
|
||||
carrier.Set(exstfieldB+k, v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *textMapPropagator) extract(opaqueCarrier interface{}) (ot.SpanContext, error) {
|
||||
carrier, ok := opaqueCarrier.(ot.TextMapReader)
|
||||
if !ok {
|
||||
return nil, ot.ErrInvalidCarrier
|
||||
}
|
||||
|
||||
fieldCount := 0
|
||||
var traceID, spanID int64
|
||||
var err error
|
||||
baggage := make(map[string]string)
|
||||
err = carrier.ForeachKey(func(k, v string) error {
|
||||
switch strings.ToLower(k) {
|
||||
case FieldT:
|
||||
fieldCount++
|
||||
traceID, err = Header2ID(v)
|
||||
if err != nil {
|
||||
return ot.ErrSpanContextCorrupted
|
||||
}
|
||||
case FieldS:
|
||||
fieldCount++
|
||||
spanID, err = Header2ID(v)
|
||||
if err != nil {
|
||||
return ot.ErrSpanContextCorrupted
|
||||
}
|
||||
default:
|
||||
lk := strings.ToLower(k)
|
||||
|
||||
if strings.HasPrefix(lk, FieldB) {
|
||||
baggage[strings.TrimPrefix(lk, FieldB)] = v
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return r.finishExtract(err, fieldCount, traceID, spanID, baggage)
|
||||
}
|
||||
|
||||
func (r *textMapPropagator) finishExtract(err error,
|
||||
fieldCount int,
|
||||
traceID int64,
|
||||
spanID int64,
|
||||
baggage map[string]string) (ot.SpanContext, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if fieldCount < 2 {
|
||||
if fieldCount == 0 {
|
||||
return nil, ot.ErrSpanContextNotFound
|
||||
}
|
||||
|
||||
return nil, ot.ErrSpanContextCorrupted
|
||||
}
|
||||
|
||||
return SpanContext{
|
||||
TraceID: traceID,
|
||||
SpanID: spanID,
|
||||
Sampled: false,
|
||||
Baggage: baggage,
|
||||
}, nil
|
||||
}
|
175
vendor/github.com/instana/go-sensor/recorder.go
generated
vendored
Normal file
175
vendor/github.com/instana/go-sensor/recorder.go
generated
vendored
Normal file
@ -0,0 +1,175 @@
|
||||
package instana
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// A SpanRecorder handles all of the `RawSpan` data generated via an
|
||||
// associated `Tracer` (see `NewStandardTracer`) instance. It also names
|
||||
// the containing process and provides access to a straightforward tag map.
|
||||
type SpanRecorder interface {
|
||||
// Implementations must determine whether and where to store `span`.
|
||||
RecordSpan(span *spanS)
|
||||
}
|
||||
|
||||
// Recorder accepts spans, processes and queues them
|
||||
// for delivery to the backend.
|
||||
type Recorder struct {
|
||||
sync.RWMutex
|
||||
spans []jsonSpan
|
||||
testMode bool
|
||||
}
|
||||
|
||||
// NewRecorder Establish a Recorder span recorder
|
||||
func NewRecorder() *Recorder {
|
||||
r := new(Recorder)
|
||||
r.init()
|
||||
return r
|
||||
}
|
||||
|
||||
// NewTestRecorder Establish a new span recorder used for testing
|
||||
func NewTestRecorder() *Recorder {
|
||||
r := new(Recorder)
|
||||
r.testMode = true
|
||||
r.init()
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *Recorder) init() {
|
||||
r.clearQueuedSpans()
|
||||
|
||||
if r.testMode {
|
||||
return
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(1 * time.Second)
|
||||
go func() {
|
||||
for range ticker.C {
|
||||
if sensor.agent.canSend() {
|
||||
r.send()
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// RecordSpan accepts spans to be recorded and and added to the span queue
|
||||
// for eventual reporting to the host agent.
|
||||
func (r *Recorder) RecordSpan(span *spanS) {
|
||||
// If we're not announced and not in test mode then just
|
||||
// return
|
||||
if !r.testMode && !sensor.agent.canSend() {
|
||||
return
|
||||
}
|
||||
|
||||
var data = &jsonData{}
|
||||
kind := span.getSpanKind()
|
||||
|
||||
data.SDK = &jsonSDKData{
|
||||
Name: span.Operation,
|
||||
Type: kind,
|
||||
Custom: &jsonCustomData{Tags: span.Tags, Logs: span.collectLogs()}}
|
||||
|
||||
baggage := make(map[string]string)
|
||||
span.context.ForeachBaggageItem(func(k string, v string) bool {
|
||||
baggage[k] = v
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
if len(baggage) > 0 {
|
||||
data.SDK.Custom.Baggage = baggage
|
||||
}
|
||||
|
||||
data.Service = sensor.serviceName
|
||||
|
||||
var parentID *int64
|
||||
if span.ParentSpanID == 0 {
|
||||
parentID = nil
|
||||
} else {
|
||||
parentID = &span.ParentSpanID
|
||||
}
|
||||
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
|
||||
if len(r.spans) == sensor.options.MaxBufferedSpans {
|
||||
r.spans = r.spans[1:]
|
||||
}
|
||||
|
||||
r.spans = append(r.spans, jsonSpan{
|
||||
TraceID: span.context.TraceID,
|
||||
ParentID: parentID,
|
||||
SpanID: span.context.SpanID,
|
||||
Timestamp: uint64(span.Start.UnixNano()) / uint64(time.Millisecond),
|
||||
Duration: uint64(span.Duration) / uint64(time.Millisecond),
|
||||
Name: "sdk",
|
||||
Error: span.Error,
|
||||
Ec: span.Ec,
|
||||
Lang: "go",
|
||||
From: sensor.agent.from,
|
||||
Data: data})
|
||||
|
||||
if r.testMode || !sensor.agent.canSend() {
|
||||
return
|
||||
}
|
||||
|
||||
if len(r.spans) >= sensor.options.ForceTransmissionStartingAt {
|
||||
log.debug("Forcing spans to agent. Count:", len(r.spans))
|
||||
go r.send()
|
||||
}
|
||||
}
|
||||
|
||||
// QueuedSpansCount returns the number of queued spans
|
||||
// Used only in tests currently.
|
||||
func (r *Recorder) QueuedSpansCount() int {
|
||||
r.RLock()
|
||||
defer r.RUnlock()
|
||||
return len(r.spans)
|
||||
}
|
||||
|
||||
// GetQueuedSpans returns a copy of the queued spans and clears the queue.
|
||||
func (r *Recorder) GetQueuedSpans() []jsonSpan {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
|
||||
// Copy queued spans
|
||||
queuedSpans := make([]jsonSpan, len(r.spans))
|
||||
copy(queuedSpans, r.spans)
|
||||
|
||||
// and clear out the source
|
||||
r.clearQueuedSpans()
|
||||
return queuedSpans
|
||||
}
|
||||
|
||||
// clearQueuedSpans brings the span queue to empty/0/nada
|
||||
// This function doesn't take the Lock so make sure to have
|
||||
// the write lock before calling.
|
||||
// This is meant to be called from GetQueuedSpans which handles
|
||||
// locking.
|
||||
func (r *Recorder) clearQueuedSpans() {
|
||||
var mbs int
|
||||
|
||||
if len(r.spans) > 0 {
|
||||
if sensor != nil {
|
||||
mbs = sensor.options.MaxBufferedSpans
|
||||
} else {
|
||||
mbs = DefaultMaxBufferedSpans
|
||||
}
|
||||
r.spans = make([]jsonSpan, 0, mbs)
|
||||
}
|
||||
}
|
||||
|
||||
// Retrieve the queued spans and post them to the host agent asynchronously.
|
||||
func (r *Recorder) send() {
|
||||
spansToSend := r.GetQueuedSpans()
|
||||
if len(spansToSend) > 0 {
|
||||
go func() {
|
||||
_, err := sensor.agent.request(sensor.agent.makeURL(agentTracesURL), "POST", spansToSend)
|
||||
if err != nil {
|
||||
log.debug("Posting traces failed in send(): ", err)
|
||||
sensor.agent.reset()
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
76
vendor/github.com/instana/go-sensor/sensor.go
generated
vendored
Normal file
76
vendor/github.com/instana/go-sensor/sensor.go
generated
vendored
Normal file
@ -0,0 +1,76 @@
|
||||
package instana
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
|
||||
const (
|
||||
DefaultMaxBufferedSpans = 1000
|
||||
DefaultForceSpanSendAt = 500
|
||||
)
|
||||
|
||||
type sensorS struct {
|
||||
meter *meterS
|
||||
agent *agentS
|
||||
options *Options
|
||||
serviceName string
|
||||
}
|
||||
|
||||
var sensor *sensorS
|
||||
|
||||
func (r *sensorS) init(options *Options) {
|
||||
//sensor can be initialized explicit or implicit through OpenTracing global init
|
||||
if r.meter == nil {
|
||||
r.setOptions(options)
|
||||
r.configureServiceName()
|
||||
r.agent = r.initAgent()
|
||||
r.meter = r.initMeter()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *sensorS) setOptions(options *Options) {
|
||||
r.options = options
|
||||
if r.options == nil {
|
||||
r.options = &Options{}
|
||||
}
|
||||
|
||||
if r.options.MaxBufferedSpans == 0 {
|
||||
r.options.MaxBufferedSpans = DefaultMaxBufferedSpans
|
||||
}
|
||||
|
||||
if r.options.ForceTransmissionStartingAt == 0 {
|
||||
r.options.ForceTransmissionStartingAt = DefaultForceSpanSendAt
|
||||
}
|
||||
}
|
||||
|
||||
func (r *sensorS) getOptions() *Options {
|
||||
return r.options
|
||||
}
|
||||
|
||||
func (r *sensorS) configureServiceName() {
|
||||
if r.options != nil {
|
||||
r.serviceName = r.options.Service
|
||||
}
|
||||
|
||||
if r.serviceName == "" {
|
||||
r.serviceName = filepath.Base(os.Args[0])
|
||||
}
|
||||
}
|
||||
|
||||
// InitSensor Intializes the sensor (without tracing) to begin collecting
|
||||
// and reporting metrics.
|
||||
func InitSensor(options *Options) {
|
||||
if sensor == nil {
|
||||
sensor = new(sensorS)
|
||||
// If this environment variable is set, then override log level
|
||||
_, ok := os.LookupEnv("INSTANA_DEV")
|
||||
if ok {
|
||||
options.LogLevel = Debug
|
||||
}
|
||||
|
||||
sensor.initLog()
|
||||
sensor.init(options)
|
||||
log.debug("initialized sensor")
|
||||
}
|
||||
}
|
253
vendor/github.com/instana/go-sensor/span.go
generated
vendored
Normal file
253
vendor/github.com/instana/go-sensor/span.go
generated
vendored
Normal file
@ -0,0 +1,253 @@
|
||||
package instana
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
ot "github.com/opentracing/opentracing-go"
|
||||
"github.com/opentracing/opentracing-go/ext"
|
||||
otlog "github.com/opentracing/opentracing-go/log"
|
||||
)
|
||||
|
||||
type spanS struct {
|
||||
tracer *tracerS
|
||||
sync.Mutex
|
||||
|
||||
context SpanContext
|
||||
ParentSpanID int64
|
||||
Operation string
|
||||
Start time.Time
|
||||
Duration time.Duration
|
||||
Tags ot.Tags
|
||||
Logs []ot.LogRecord
|
||||
Error bool
|
||||
Ec int
|
||||
}
|
||||
|
||||
func (r *spanS) BaggageItem(key string) string {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
|
||||
return r.context.Baggage[key]
|
||||
}
|
||||
|
||||
func (r *spanS) SetBaggageItem(key, val string) ot.Span {
|
||||
if r.trim() {
|
||||
return r
|
||||
}
|
||||
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
r.context = r.context.WithBaggageItem(key, val)
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *spanS) Context() ot.SpanContext {
|
||||
return r.context
|
||||
}
|
||||
|
||||
func (r *spanS) Finish() {
|
||||
r.FinishWithOptions(ot.FinishOptions{})
|
||||
}
|
||||
|
||||
func (r *spanS) FinishWithOptions(opts ot.FinishOptions) {
|
||||
finishTime := opts.FinishTime
|
||||
if finishTime.IsZero() {
|
||||
finishTime = time.Now()
|
||||
}
|
||||
|
||||
duration := finishTime.Sub(r.Start)
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
for _, lr := range opts.LogRecords {
|
||||
r.appendLog(lr)
|
||||
}
|
||||
|
||||
for _, ld := range opts.BulkLogData {
|
||||
r.appendLog(ld.ToLogRecord())
|
||||
}
|
||||
|
||||
r.Duration = duration
|
||||
r.tracer.options.Recorder.RecordSpan(r)
|
||||
}
|
||||
|
||||
func (r *spanS) appendLog(lr ot.LogRecord) {
|
||||
maxLogs := r.tracer.options.MaxLogsPerSpan
|
||||
if maxLogs == 0 || len(r.Logs) < maxLogs {
|
||||
r.Logs = append(r.Logs, lr)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *spanS) Log(ld ot.LogData) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
if r.trim() || r.tracer.options.DropAllLogs {
|
||||
return
|
||||
}
|
||||
|
||||
if ld.Timestamp.IsZero() {
|
||||
ld.Timestamp = time.Now()
|
||||
}
|
||||
|
||||
r.appendLog(ld.ToLogRecord())
|
||||
}
|
||||
|
||||
func (r *spanS) trim() bool {
|
||||
return !r.context.Sampled && r.tracer.options.TrimUnsampledSpans
|
||||
}
|
||||
|
||||
func (r *spanS) LogEvent(event string) {
|
||||
r.Log(ot.LogData{
|
||||
Event: event})
|
||||
}
|
||||
|
||||
func (r *spanS) LogEventWithPayload(event string, payload interface{}) {
|
||||
r.Log(ot.LogData{
|
||||
Event: event,
|
||||
Payload: payload})
|
||||
}
|
||||
|
||||
func (r *spanS) LogFields(fields ...otlog.Field) {
|
||||
|
||||
for _, v := range fields {
|
||||
// If this tag indicates an error, increase the error count
|
||||
if v.Key() == "error" {
|
||||
r.Error = true
|
||||
r.Ec++
|
||||
}
|
||||
}
|
||||
|
||||
lr := ot.LogRecord{
|
||||
Fields: fields,
|
||||
}
|
||||
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
if r.trim() || r.tracer.options.DropAllLogs {
|
||||
return
|
||||
}
|
||||
|
||||
if lr.Timestamp.IsZero() {
|
||||
lr.Timestamp = time.Now()
|
||||
}
|
||||
|
||||
r.appendLog(lr)
|
||||
}
|
||||
|
||||
func (r *spanS) LogKV(keyValues ...interface{}) {
|
||||
fields, err := otlog.InterleavedKVToFields(keyValues...)
|
||||
if err != nil {
|
||||
r.LogFields(otlog.Error(err), otlog.String("function", "LogKV"))
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
r.LogFields(fields...)
|
||||
}
|
||||
|
||||
func (r *spanS) SetOperationName(operationName string) ot.Span {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
r.Operation = operationName
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *spanS) SetTag(key string, value interface{}) ot.Span {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
if r.trim() {
|
||||
return r
|
||||
}
|
||||
|
||||
if r.Tags == nil {
|
||||
r.Tags = ot.Tags{}
|
||||
}
|
||||
|
||||
// If this tag indicates an error, increase the error count
|
||||
if key == "error" {
|
||||
r.Error = true
|
||||
r.Ec++
|
||||
}
|
||||
|
||||
r.Tags[key] = value
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
func (r *spanS) Tracer() ot.Tracer {
|
||||
return r.tracer
|
||||
}
|
||||
|
||||
func (r *spanS) getTag(tag string) interface{} {
|
||||
var x, ok = r.Tags[tag]
|
||||
if !ok {
|
||||
x = ""
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
func (r *spanS) getIntTag(tag string) int {
|
||||
d := r.Tags[tag]
|
||||
if d == nil {
|
||||
return -1
|
||||
}
|
||||
|
||||
x, ok := d.(int)
|
||||
if !ok {
|
||||
return -1
|
||||
}
|
||||
|
||||
return x
|
||||
}
|
||||
|
||||
func (r *spanS) getStringTag(tag string) string {
|
||||
d := r.Tags[tag]
|
||||
if d == nil {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprint(d)
|
||||
}
|
||||
|
||||
func (r *spanS) getHostName() string {
|
||||
hostTag := r.getStringTag(string(ext.PeerHostname))
|
||||
if hostTag != "" {
|
||||
return hostTag
|
||||
}
|
||||
|
||||
h, err := os.Hostname()
|
||||
if err != nil {
|
||||
h = "localhost"
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
func (r *spanS) getSpanKind() string {
|
||||
kind := r.getStringTag(string(ext.SpanKind))
|
||||
|
||||
switch kind {
|
||||
case string(ext.SpanKindRPCServerEnum), "consumer", "entry":
|
||||
return "entry"
|
||||
case string(ext.SpanKindRPCClientEnum), "producer", "exit":
|
||||
return "exit"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (r *spanS) collectLogs() map[uint64]map[string]interface{} {
|
||||
logs := make(map[uint64]map[string]interface{})
|
||||
for _, l := range r.Logs {
|
||||
if _, ok := logs[uint64(l.Timestamp.UnixNano())/uint64(time.Millisecond)]; !ok {
|
||||
logs[uint64(l.Timestamp.UnixNano())/uint64(time.Millisecond)] = make(map[string]interface{})
|
||||
}
|
||||
|
||||
for _, f := range l.Fields {
|
||||
logs[uint64(l.Timestamp.UnixNano())/uint64(time.Millisecond)][f.Key()] = f.Value()
|
||||
}
|
||||
}
|
||||
|
||||
return logs
|
||||
}
|
119
vendor/github.com/instana/go-sensor/tracer.go
generated
vendored
Normal file
119
vendor/github.com/instana/go-sensor/tracer.go
generated
vendored
Normal file
@ -0,0 +1,119 @@
|
||||
package instana
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
ot "github.com/opentracing/opentracing-go"
|
||||
)
|
||||
|
||||
const (
|
||||
// MaxLogsPerSpan The maximum number of logs allowed on a span.
|
||||
MaxLogsPerSpan = 2
|
||||
)
|
||||
|
||||
type tracerS struct {
|
||||
options TracerOptions
|
||||
textPropagator *textMapPropagator
|
||||
}
|
||||
|
||||
func (r *tracerS) Inject(sc ot.SpanContext, format interface{}, carrier interface{}) error {
|
||||
switch format {
|
||||
case ot.TextMap, ot.HTTPHeaders:
|
||||
return r.textPropagator.inject(sc, carrier)
|
||||
}
|
||||
|
||||
return ot.ErrUnsupportedFormat
|
||||
}
|
||||
|
||||
func (r *tracerS) Extract(format interface{}, carrier interface{}) (ot.SpanContext, error) {
|
||||
switch format {
|
||||
case ot.TextMap, ot.HTTPHeaders:
|
||||
return r.textPropagator.extract(carrier)
|
||||
}
|
||||
|
||||
return nil, ot.ErrUnsupportedFormat
|
||||
}
|
||||
|
||||
func (r *tracerS) StartSpan(operationName string, opts ...ot.StartSpanOption) ot.Span {
|
||||
sso := ot.StartSpanOptions{}
|
||||
for _, o := range opts {
|
||||
o.Apply(&sso)
|
||||
}
|
||||
|
||||
return r.StartSpanWithOptions(operationName, sso)
|
||||
}
|
||||
|
||||
func (r *tracerS) StartSpanWithOptions(operationName string, opts ot.StartSpanOptions) ot.Span {
|
||||
startTime := opts.StartTime
|
||||
if startTime.IsZero() {
|
||||
startTime = time.Now()
|
||||
}
|
||||
|
||||
tags := opts.Tags
|
||||
span := &spanS{}
|
||||
Loop:
|
||||
for _, ref := range opts.References {
|
||||
switch ref.Type {
|
||||
case ot.ChildOfRef, ot.FollowsFromRef:
|
||||
refCtx := ref.ReferencedContext.(SpanContext)
|
||||
span.context.TraceID = refCtx.TraceID
|
||||
span.context.SpanID = randomID()
|
||||
span.context.Sampled = refCtx.Sampled
|
||||
span.ParentSpanID = refCtx.SpanID
|
||||
if l := len(refCtx.Baggage); l > 0 {
|
||||
span.context.Baggage = make(map[string]string, l)
|
||||
for k, v := range refCtx.Baggage {
|
||||
span.context.Baggage[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
break Loop
|
||||
}
|
||||
}
|
||||
|
||||
if span.context.TraceID == 0 {
|
||||
span.context.SpanID = randomID()
|
||||
span.context.TraceID = span.context.SpanID
|
||||
span.context.Sampled = r.options.ShouldSample(span.context.TraceID)
|
||||
}
|
||||
|
||||
return r.startSpanInternal(span, operationName, startTime, tags)
|
||||
}
|
||||
|
||||
func (r *tracerS) startSpanInternal(span *spanS, operationName string, startTime time.Time, tags ot.Tags) ot.Span {
|
||||
span.tracer = r
|
||||
span.Operation = operationName
|
||||
span.Start = startTime
|
||||
span.Duration = -1
|
||||
span.Tags = tags
|
||||
|
||||
return span
|
||||
}
|
||||
|
||||
func shouldSample(traceID int64) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// NewTracer Get a new Tracer with the default options applied.
|
||||
func NewTracer() ot.Tracer {
|
||||
return NewTracerWithOptions(&Options{})
|
||||
}
|
||||
|
||||
// NewTracerWithOptions Get a new Tracer with the specified options.
|
||||
func NewTracerWithOptions(options *Options) ot.Tracer {
|
||||
InitSensor(options)
|
||||
|
||||
return NewTracerWithEverything(options, NewRecorder())
|
||||
}
|
||||
|
||||
// NewTracerWithEverything Get a new Tracer with the works.
|
||||
func NewTracerWithEverything(options *Options, recorder SpanRecorder) ot.Tracer {
|
||||
InitSensor(options)
|
||||
ret := &tracerS{options: TracerOptions{
|
||||
Recorder: recorder,
|
||||
ShouldSample: shouldSample,
|
||||
MaxLogsPerSpan: MaxLogsPerSpan}}
|
||||
ret.textPropagator = &textMapPropagator{ret}
|
||||
|
||||
return ret
|
||||
}
|
91
vendor/github.com/instana/go-sensor/tracer_options.go
generated
vendored
Normal file
91
vendor/github.com/instana/go-sensor/tracer_options.go
generated
vendored
Normal file
@ -0,0 +1,91 @@
|
||||
package instana
|
||||
|
||||
import (
|
||||
bt "github.com/opentracing/basictracer-go"
|
||||
opentracing "github.com/opentracing/opentracing-go"
|
||||
)
|
||||
|
||||
// Tracer extends the opentracing.Tracer interface
|
||||
type Tracer interface {
|
||||
opentracing.Tracer
|
||||
|
||||
// Options gets the Options used in New() or NewWithOptions().
|
||||
Options() TracerOptions
|
||||
}
|
||||
|
||||
// TracerOptions allows creating a customized Tracer via NewWithOptions. The object
|
||||
// must not be updated when there is an active tracer using it.
|
||||
type TracerOptions struct {
|
||||
// ShouldSample is a function which is called when creating a new Span and
|
||||
// determines whether that Span is sampled. The randomized TraceID is supplied
|
||||
// to allow deterministic sampling decisions to be made across different nodes.
|
||||
// For example,
|
||||
//
|
||||
// func(traceID uint64) { return traceID % 64 == 0 }
|
||||
//
|
||||
// samples every 64th trace on average.
|
||||
ShouldSample func(traceID int64) bool
|
||||
// TrimUnsampledSpans turns potentially expensive operations on unsampled
|
||||
// Spans into no-ops. More precisely, tags and log events are silently
|
||||
// discarded. If NewSpanEventListener is set, the callbacks will still fire.
|
||||
TrimUnsampledSpans bool
|
||||
// Recorder receives Spans which have been finished.
|
||||
Recorder SpanRecorder
|
||||
// NewSpanEventListener can be used to enhance the tracer by effectively
|
||||
// attaching external code to trace events. See NetTraceIntegrator for a
|
||||
// practical example, and event.go for the list of possible events.
|
||||
NewSpanEventListener func() func(bt.SpanEvent)
|
||||
// DropAllLogs turns log events on all Spans into no-ops.
|
||||
// If NewSpanEventListener is set, the callbacks will still fire.
|
||||
DropAllLogs bool
|
||||
// MaxLogsPerSpan limits the number of Logs in a span (if set to a nonzero
|
||||
// value). If a span has more logs than this value, logs are dropped as
|
||||
// necessary (and replaced with a log describing how many were dropped).
|
||||
//
|
||||
// About half of the MaxLogPerSpan logs kept are the oldest logs, and about
|
||||
// half are the newest logs.
|
||||
//
|
||||
// If NewSpanEventListener is set, the callbacks will still fire for all log
|
||||
// events. This value is ignored if DropAllLogs is true.
|
||||
MaxLogsPerSpan int
|
||||
// DebugAssertSingleGoroutine internally records the ID of the goroutine
|
||||
// creating each Span and verifies that no operation is carried out on
|
||||
// it on a different goroutine.
|
||||
// Provided strictly for development purposes.
|
||||
// Passing Spans between goroutine without proper synchronization often
|
||||
// results in use-after-Finish() errors. For a simple example, consider the
|
||||
// following pseudocode:
|
||||
//
|
||||
// func (s *Server) Handle(req http.Request) error {
|
||||
// sp := s.StartSpan("server")
|
||||
// defer sp.Finish()
|
||||
// wait := s.queueProcessing(opentracing.ContextWithSpan(context.Background(), sp), req)
|
||||
// select {
|
||||
// case resp := <-wait:
|
||||
// return resp.Error
|
||||
// case <-time.After(10*time.Second):
|
||||
// sp.LogEvent("timed out waiting for processing")
|
||||
// return ErrTimedOut
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// This looks reasonable at first, but a request which spends more than ten
|
||||
// seconds in the queue is abandoned by the main goroutine and its trace
|
||||
// finished, leading to use-after-finish when the request is finally
|
||||
// processed. Note also that even joining on to a finished Span via
|
||||
// StartSpanWithOptions constitutes an illegal operation.
|
||||
//
|
||||
// Code bases which do not require (or decide they do not want) Spans to
|
||||
// be passed across goroutine boundaries can run with this flag enabled in
|
||||
// tests to increase their chances of spotting wrong-doers.
|
||||
DebugAssertSingleGoroutine bool
|
||||
// DebugAssertUseAfterFinish is provided strictly for development purposes.
|
||||
// When set, it attempts to exacerbate issues emanating from use of Spans
|
||||
// after calling Finish by running additional assertions.
|
||||
DebugAssertUseAfterFinish bool
|
||||
// EnableSpanPool enables the use of a pool, so that the tracer reuses spans
|
||||
// after Finish has been called on it. Adds a slight performance gain as it
|
||||
// reduces allocations. However, if you have any use-after-finish race
|
||||
// conditions the code may panic.
|
||||
EnableSpanPool bool
|
||||
}
|
99
vendor/github.com/instana/go-sensor/util.go
generated
vendored
Normal file
99
vendor/github.com/instana/go-sensor/util.go
generated
vendored
Normal file
@ -0,0 +1,99 @@
|
||||
package instana
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
seededIDGen = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
seededIDLock sync.Mutex
|
||||
)
|
||||
|
||||
func randomID() int64 {
|
||||
seededIDLock.Lock()
|
||||
defer seededIDLock.Unlock()
|
||||
return int64(seededIDGen.Int63())
|
||||
}
|
||||
|
||||
// ID2Header converts an Instana ID to a value that can be used in
|
||||
// context propagation (such as HTTP headers). More specifically,
|
||||
// this converts a signed 64 bit integer into an unsigned hex string.
|
||||
func ID2Header(id int64) (string, error) {
|
||||
// FIXME: We're assuming LittleEndian here
|
||||
|
||||
// Write out _signed_ 64bit integer to byte buffer
|
||||
buf := new(bytes.Buffer)
|
||||
if err := binary.Write(buf, binary.LittleEndian, id); err == nil {
|
||||
// Read bytes back into _unsigned_ 64 bit integer
|
||||
var unsigned uint64
|
||||
if err = binary.Read(buf, binary.LittleEndian, &unsigned); err == nil {
|
||||
// Convert uint64 to hex string equivalent and return that
|
||||
return strconv.FormatUint(unsigned, 16), nil
|
||||
}
|
||||
log.debug(err)
|
||||
} else {
|
||||
log.debug(err)
|
||||
}
|
||||
return "", errors.New("context corrupted; could not convert value")
|
||||
}
|
||||
|
||||
// Header2ID converts an header context value into an Instana ID. More
|
||||
// specifically, this converts an unsigned 64 bit hex value into a signed
|
||||
// 64bit integer.
|
||||
func Header2ID(header string) (int64, error) {
|
||||
// FIXME: We're assuming LittleEndian here
|
||||
|
||||
// Parse unsigned 64 bit hex string into unsigned 64 bit base 10 integer
|
||||
if unsignedID, err := strconv.ParseUint(header, 16, 64); err == nil {
|
||||
// Write out _unsigned_ 64bit integer to byte buffer
|
||||
buf := new(bytes.Buffer)
|
||||
if err = binary.Write(buf, binary.LittleEndian, unsignedID); err == nil {
|
||||
// Read bytes back into _signed_ 64 bit integer
|
||||
var signedID int64
|
||||
if err = binary.Read(buf, binary.LittleEndian, &signedID); err == nil {
|
||||
// The success case
|
||||
return signedID, nil
|
||||
}
|
||||
log.debug(err)
|
||||
} else {
|
||||
log.debug(err)
|
||||
}
|
||||
} else {
|
||||
log.debug(err)
|
||||
}
|
||||
return int64(0), errors.New("context corrupted; could not convert value")
|
||||
}
|
||||
|
||||
func getCommandLine() (string, []string) {
|
||||
var cmdlinePath string = "/proc/" + strconv.Itoa(os.Getpid()) + "/cmdline"
|
||||
|
||||
cmdline, err := ioutil.ReadFile(cmdlinePath)
|
||||
|
||||
if err != nil {
|
||||
log.debug("No /proc. Returning OS reported cmdline")
|
||||
return os.Args[0], os.Args[1:]
|
||||
}
|
||||
|
||||
parts := strings.FieldsFunc(string(cmdline), func(c rune) bool {
|
||||
if c == '\u0000' {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
})
|
||||
log.debug("cmdline says:", parts[0], parts[1:])
|
||||
return parts[0], parts[1:]
|
||||
}
|
||||
|
||||
func abs(x int64) int64 {
|
||||
y := x >> 63
|
||||
return (x + y) ^ y
|
||||
}
|
191
vendor/github.com/looplab/fsm/LICENSE
generated
vendored
Normal file
191
vendor/github.com/looplab/fsm/LICENSE
generated
vendored
Normal file
@ -0,0 +1,191 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction, and
|
||||
distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by the copyright
|
||||
owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all other entities
|
||||
that control, are controlled by, or are under common control with that entity.
|
||||
For the purposes of this definition, "control" means (i) the power, direct or
|
||||
indirect, to cause the direction or management of such entity, whether by
|
||||
contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity exercising
|
||||
permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications, including
|
||||
but not limited to software source code, documentation source, and configuration
|
||||
files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical transformation or
|
||||
translation of a Source form, including but not limited to compiled object code,
|
||||
generated documentation, and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or Object form, made
|
||||
available under the License, as indicated by a copyright notice that is included
|
||||
in or attached to the work (an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object form, that
|
||||
is based on (or derived from) the Work and for which the editorial revisions,
|
||||
annotations, elaborations, or other modifications represent, as a whole, an
|
||||
original work of authorship. For the purposes of this License, Derivative Works
|
||||
shall not include works that remain separable from, or merely link (or bind by
|
||||
name) to the interfaces of, the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including the original version
|
||||
of the Work and any modifications or additions to that Work or Derivative Works
|
||||
thereof, that is intentionally submitted to Licensor for inclusion in the Work
|
||||
by the copyright owner or by an individual or Legal Entity authorized to submit
|
||||
on behalf of the copyright owner. For the purposes of this definition,
|
||||
"submitted" means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems, and
|
||||
issue tracking systems that are managed by, or on behalf of, the Licensor for
|
||||
the purpose of discussing and improving the Work, but excluding communication
|
||||
that is conspicuously marked or otherwise designated in writing by the copyright
|
||||
owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity on behalf
|
||||
of whom a Contribution has been received by Licensor and subsequently
|
||||
incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the Work and such
|
||||
Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License.
|
||||
|
||||
Subject to the terms and conditions of this License, each Contributor hereby
|
||||
grants to You a perpetual, worldwide, non-exclusive, no-charge, royalty-free,
|
||||
irrevocable (except as stated in this section) patent license to make, have
|
||||
made, use, offer to sell, sell, import, and otherwise transfer the Work, where
|
||||
such license applies only to those patent claims licensable by such Contributor
|
||||
that are necessarily infringed by their Contribution(s) alone or by combination
|
||||
of their Contribution(s) with the Work to which such Contribution(s) was
|
||||
submitted. If You institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work or a
|
||||
Contribution incorporated within the Work constitutes direct or contributory
|
||||
patent infringement, then any patent licenses granted to You under this License
|
||||
for that Work shall terminate as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution.
|
||||
|
||||
You may reproduce and distribute copies of the Work or Derivative Works thereof
|
||||
in any medium, with or without modifications, and in Source or Object form,
|
||||
provided that You meet the following conditions:
|
||||
|
||||
You must give any other recipients of the Work or Derivative Works a copy of
|
||||
this License; and
|
||||
You must cause any modified files to carry prominent notices stating that You
|
||||
changed the files; and
|
||||
You must retain, in the Source form of any Derivative Works that You distribute,
|
||||
all copyright, patent, trademark, and attribution notices from the Source form
|
||||
of the Work, excluding those notices that do not pertain to any part of the
|
||||
Derivative Works; and
|
||||
If the Work includes a "NOTICE" text file as part of its distribution, then any
|
||||
Derivative Works that You distribute must include a readable copy of the
|
||||
attribution notices contained within such NOTICE file, excluding those notices
|
||||
that do not pertain to any part of the Derivative Works, in at least one of the
|
||||
following places: within a NOTICE text file distributed as part of the
|
||||
Derivative Works; within the Source form or documentation, if provided along
|
||||
with the Derivative Works; or, within a display generated by the Derivative
|
||||
Works, if and wherever such third-party notices normally appear. The contents of
|
||||
the NOTICE file are for informational purposes only and do not modify the
|
||||
License. You may add Your own attribution notices within Derivative Works that
|
||||
You distribute, alongside or as an addendum to the NOTICE text from the Work,
|
||||
provided that such additional attribution notices cannot be construed as
|
||||
modifying the License.
|
||||
You may add Your own copyright statement to Your modifications and may provide
|
||||
additional or different license terms and conditions for use, reproduction, or
|
||||
distribution of Your modifications, or for any such Derivative Works as a whole,
|
||||
provided Your use, reproduction, and distribution of the Work otherwise complies
|
||||
with the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions.
|
||||
|
||||
Unless You explicitly state otherwise, any Contribution intentionally submitted
|
||||
for inclusion in the Work by You to the Licensor shall be under the terms and
|
||||
conditions of this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify the terms of
|
||||
any separate license agreement you may have executed with Licensor regarding
|
||||
such Contributions.
|
||||
|
||||
6. Trademarks.
|
||||
|
||||
This License does not grant permission to use the trade names, trademarks,
|
||||
service marks, or product names of the Licensor, except as required for
|
||||
reasonable and customary use in describing the origin of the Work and
|
||||
reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty.
|
||||
|
||||
Unless required by applicable law or agreed to in writing, Licensor provides the
|
||||
Work (and each Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||
including, without limitation, any warranties or conditions of TITLE,
|
||||
NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE. You are
|
||||
solely responsible for determining the appropriateness of using or
|
||||
redistributing the Work and assume any risks associated with Your exercise of
|
||||
permissions under this License.
|
||||
|
||||
8. Limitation of Liability.
|
||||
|
||||
In no event and under no legal theory, whether in tort (including negligence),
|
||||
contract, or otherwise, unless required by applicable law (such as deliberate
|
||||
and grossly negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special, incidental,
|
||||
or consequential damages of any character arising as a result of this License or
|
||||
out of the use or inability to use the Work (including but not limited to
|
||||
damages for loss of goodwill, work stoppage, computer failure or malfunction, or
|
||||
any and all other commercial damages or losses), even if such Contributor has
|
||||
been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability.
|
||||
|
||||
While redistributing the Work or Derivative Works thereof, You may choose to
|
||||
offer, and charge a fee for, acceptance of support, warranty, indemnity, or
|
||||
other liability obligations and/or rights consistent with this License. However,
|
||||
in accepting such obligations, You may act only on Your own behalf and on Your
|
||||
sole responsibility, not on behalf of any other Contributor, and only if You
|
||||
agree to indemnify, defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason of your
|
||||
accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work
|
||||
|
||||
To apply the Apache License to your work, attach the following boilerplate
|
||||
notice, with the fields enclosed by brackets "[]" replaced with your own
|
||||
identifying information. (Don't include the brackets!) The text should be
|
||||
enclosed in the appropriate comment syntax for the file format. We also
|
||||
recommend that a file or class name and description of purpose be included on
|
||||
the same "printed page" as the copyright notice for easier identification within
|
||||
third-party archives.
|
||||
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
100
vendor/github.com/looplab/fsm/errors.go
generated
vendored
Normal file
100
vendor/github.com/looplab/fsm/errors.go
generated
vendored
Normal file
@ -0,0 +1,100 @@
|
||||
// Copyright (c) 2013 - Max Persson <max@looplab.se>
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package fsm
|
||||
|
||||
// InvalidEventError is returned by FSM.Event() when the event cannot be called
|
||||
// in the current state.
|
||||
type InvalidEventError struct {
|
||||
Event string
|
||||
State string
|
||||
}
|
||||
|
||||
func (e InvalidEventError) Error() string {
|
||||
return "event " + e.Event + " inappropriate in current state " + e.State
|
||||
}
|
||||
|
||||
// UnknownEventError is returned by FSM.Event() when the event is not defined.
|
||||
type UnknownEventError struct {
|
||||
Event string
|
||||
}
|
||||
|
||||
func (e UnknownEventError) Error() string {
|
||||
return "event " + e.Event + " does not exist"
|
||||
}
|
||||
|
||||
// InTransitionError is returned by FSM.Event() when an asynchronous transition
|
||||
// is already in progress.
|
||||
type InTransitionError struct {
|
||||
Event string
|
||||
}
|
||||
|
||||
func (e InTransitionError) Error() string {
|
||||
return "event " + e.Event + " inappropriate because previous transition did not complete"
|
||||
}
|
||||
|
||||
// NotInTransitionError is returned by FSM.Transition() when an asynchronous
|
||||
// transition is not in progress.
|
||||
type NotInTransitionError struct{}
|
||||
|
||||
func (e NotInTransitionError) Error() string {
|
||||
return "transition inappropriate because no state change in progress"
|
||||
}
|
||||
|
||||
// NoTransitionError is returned by FSM.Event() when no transition have happened,
|
||||
// for example if the source and destination states are the same.
|
||||
type NoTransitionError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e NoTransitionError) Error() string {
|
||||
if e.Err != nil {
|
||||
return "no transition with error: " + e.Err.Error()
|
||||
}
|
||||
return "no transition"
|
||||
}
|
||||
|
||||
// CanceledError is returned by FSM.Event() when a callback have canceled a
|
||||
// transition.
|
||||
type CanceledError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e CanceledError) Error() string {
|
||||
if e.Err != nil {
|
||||
return "transition canceled with error: " + e.Err.Error()
|
||||
}
|
||||
return "transition canceled"
|
||||
}
|
||||
|
||||
// AsyncError is returned by FSM.Event() when a callback have initiated an
|
||||
// asynchronous state transition.
|
||||
type AsyncError struct {
|
||||
Err error
|
||||
}
|
||||
|
||||
func (e AsyncError) Error() string {
|
||||
if e.Err != nil {
|
||||
return "async started with error: " + e.Err.Error()
|
||||
}
|
||||
return "async started"
|
||||
}
|
||||
|
||||
// InternalError is returned by FSM.Event() and should never occur. It is a
|
||||
// probably because of a bug.
|
||||
type InternalError struct{}
|
||||
|
||||
func (e InternalError) Error() string {
|
||||
return "internal error on state transition"
|
||||
}
|
62
vendor/github.com/looplab/fsm/event.go
generated
vendored
Normal file
62
vendor/github.com/looplab/fsm/event.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
// Copyright (c) 2013 - Max Persson <max@looplab.se>
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package fsm
|
||||
|
||||
// Event is the info that get passed as a reference in the callbacks.
|
||||
type Event struct {
|
||||
// FSM is a reference to the current FSM.
|
||||
FSM *FSM
|
||||
|
||||
// Event is the event name.
|
||||
Event string
|
||||
|
||||
// Src is the state before the transition.
|
||||
Src string
|
||||
|
||||
// Dst is the state after the transition.
|
||||
Dst string
|
||||
|
||||
// Err is an optional error that can be returned from a callback.
|
||||
Err error
|
||||
|
||||
// Args is a optinal list of arguments passed to the callback.
|
||||
Args []interface{}
|
||||
|
||||
// canceled is an internal flag set if the transition is canceled.
|
||||
canceled bool
|
||||
|
||||
// async is an internal flag set if the transition should be asynchronous
|
||||
async bool
|
||||
}
|
||||
|
||||
// Cancel can be called in before_<EVENT> or leave_<STATE> to cancel the
|
||||
// current transition before it happens. It takes an opitonal error, which will
|
||||
// overwrite e.Err if set before.
|
||||
func (e *Event) Cancel(err ...error) {
|
||||
e.canceled = true
|
||||
|
||||
if len(err) > 0 {
|
||||
e.Err = err[0]
|
||||
}
|
||||
}
|
||||
|
||||
// Async can be called in leave_<STATE> to do an asynchronous state transition.
|
||||
//
|
||||
// The current state transition will be on hold in the old state until a final
|
||||
// call to Transition is made. This will comlete the transition and possibly
|
||||
// call the other callbacks.
|
||||
func (e *Event) Async() {
|
||||
e.async = true
|
||||
}
|
447
vendor/github.com/looplab/fsm/fsm.go
generated
vendored
Normal file
447
vendor/github.com/looplab/fsm/fsm.go
generated
vendored
Normal file
@ -0,0 +1,447 @@
|
||||
// Copyright (c) 2013 - Max Persson <max@looplab.se>
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package fsm implements a finite state machine.
|
||||
//
|
||||
// It is heavily based on two FSM implementations:
|
||||
//
|
||||
// Javascript Finite State Machine
|
||||
// https://github.com/jakesgordon/javascript-state-machine
|
||||
//
|
||||
// Fysom for Python
|
||||
// https://github.com/oxplot/fysom (forked at https://github.com/mriehl/fysom)
|
||||
//
|
||||
package fsm
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// transitioner is an interface for the FSM's transition function.
|
||||
type transitioner interface {
|
||||
transition(*FSM) error
|
||||
}
|
||||
|
||||
// FSM is the state machine that holds the current state.
|
||||
//
|
||||
// It has to be created with NewFSM to function properly.
|
||||
type FSM struct {
|
||||
// current is the state that the FSM is currently in.
|
||||
current string
|
||||
|
||||
// transitions maps events and source states to destination states.
|
||||
transitions map[eKey]string
|
||||
|
||||
// callbacks maps events and targers to callback functions.
|
||||
callbacks map[cKey]Callback
|
||||
|
||||
// transition is the internal transition functions used either directly
|
||||
// or when Transition is called in an asynchronous state transition.
|
||||
transition func()
|
||||
// transitionerObj calls the FSM's transition() function.
|
||||
transitionerObj transitioner
|
||||
|
||||
// stateMu guards access to the current state.
|
||||
stateMu sync.RWMutex
|
||||
// eventMu guards access to Event() and Transition().
|
||||
eventMu sync.Mutex
|
||||
}
|
||||
|
||||
// EventDesc represents an event when initializing the FSM.
|
||||
//
|
||||
// The event can have one or more source states that is valid for performing
|
||||
// the transition. If the FSM is in one of the source states it will end up in
|
||||
// the specified destination state, calling all defined callbacks as it goes.
|
||||
type EventDesc struct {
|
||||
// Name is the event name used when calling for a transition.
|
||||
Name string
|
||||
|
||||
// Src is a slice of source states that the FSM must be in to perform a
|
||||
// state transition.
|
||||
Src []string
|
||||
|
||||
// Dst is the destination state that the FSM will be in if the transition
|
||||
// succeds.
|
||||
Dst string
|
||||
}
|
||||
|
||||
// Callback is a function type that callbacks should use. Event is the current
|
||||
// event info as the callback happens.
|
||||
type Callback func(*Event)
|
||||
|
||||
// Events is a shorthand for defining the transition map in NewFSM.
|
||||
type Events []EventDesc
|
||||
|
||||
// Callbacks is a shorthand for defining the callbacks in NewFSM.a
|
||||
type Callbacks map[string]Callback
|
||||
|
||||
// NewFSM constructs a FSM from events and callbacks.
|
||||
//
|
||||
// The events and transitions are specified as a slice of Event structs
|
||||
// specified as Events. Each Event is mapped to one or more internal
|
||||
// transitions from Event.Src to Event.Dst.
|
||||
//
|
||||
// Callbacks are added as a map specified as Callbacks where the key is parsed
|
||||
// as the callback event as follows, and called in the same order:
|
||||
//
|
||||
// 1. before_<EVENT> - called before event named <EVENT>
|
||||
//
|
||||
// 2. before_event - called before all events
|
||||
//
|
||||
// 3. leave_<OLD_STATE> - called before leaving <OLD_STATE>
|
||||
//
|
||||
// 4. leave_state - called before leaving all states
|
||||
//
|
||||
// 5. enter_<NEW_STATE> - called after entering <NEW_STATE>
|
||||
//
|
||||
// 6. enter_state - called after entering all states
|
||||
//
|
||||
// 7. after_<EVENT> - called after event named <EVENT>
|
||||
//
|
||||
// 8. after_event - called after all events
|
||||
//
|
||||
// There are also two short form versions for the most commonly used callbacks.
|
||||
// They are simply the name of the event or state:
|
||||
//
|
||||
// 1. <NEW_STATE> - called after entering <NEW_STATE>
|
||||
//
|
||||
// 2. <EVENT> - called after event named <EVENT>
|
||||
//
|
||||
// If both a shorthand version and a full version is specified it is undefined
|
||||
// which version of the callback will end up in the internal map. This is due
|
||||
// to the psuedo random nature of Go maps. No checking for multiple keys is
|
||||
// currently performed.
|
||||
func NewFSM(initial string, events []EventDesc, callbacks map[string]Callback) *FSM {
|
||||
f := &FSM{
|
||||
transitionerObj: &transitionerStruct{},
|
||||
current: initial,
|
||||
transitions: make(map[eKey]string),
|
||||
callbacks: make(map[cKey]Callback),
|
||||
}
|
||||
|
||||
// Build transition map and store sets of all events and states.
|
||||
allEvents := make(map[string]bool)
|
||||
allStates := make(map[string]bool)
|
||||
for _, e := range events {
|
||||
for _, src := range e.Src {
|
||||
f.transitions[eKey{e.Name, src}] = e.Dst
|
||||
allStates[src] = true
|
||||
allStates[e.Dst] = true
|
||||
}
|
||||
allEvents[e.Name] = true
|
||||
}
|
||||
|
||||
// Map all callbacks to events/states.
|
||||
for name, fn := range callbacks {
|
||||
var target string
|
||||
var callbackType int
|
||||
|
||||
switch {
|
||||
case strings.HasPrefix(name, "before_"):
|
||||
target = strings.TrimPrefix(name, "before_")
|
||||
if target == "event" {
|
||||
target = ""
|
||||
callbackType = callbackBeforeEvent
|
||||
} else if _, ok := allEvents[target]; ok {
|
||||
callbackType = callbackBeforeEvent
|
||||
}
|
||||
case strings.HasPrefix(name, "leave_"):
|
||||
target = strings.TrimPrefix(name, "leave_")
|
||||
if target == "state" {
|
||||
target = ""
|
||||
callbackType = callbackLeaveState
|
||||
} else if _, ok := allStates[target]; ok {
|
||||
callbackType = callbackLeaveState
|
||||
}
|
||||
case strings.HasPrefix(name, "enter_"):
|
||||
target = strings.TrimPrefix(name, "enter_")
|
||||
if target == "state" {
|
||||
target = ""
|
||||
callbackType = callbackEnterState
|
||||
} else if _, ok := allStates[target]; ok {
|
||||
callbackType = callbackEnterState
|
||||
}
|
||||
case strings.HasPrefix(name, "after_"):
|
||||
target = strings.TrimPrefix(name, "after_")
|
||||
if target == "event" {
|
||||
target = ""
|
||||
callbackType = callbackAfterEvent
|
||||
} else if _, ok := allEvents[target]; ok {
|
||||
callbackType = callbackAfterEvent
|
||||
}
|
||||
default:
|
||||
target = name
|
||||
if _, ok := allStates[target]; ok {
|
||||
callbackType = callbackEnterState
|
||||
} else if _, ok := allEvents[target]; ok {
|
||||
callbackType = callbackAfterEvent
|
||||
}
|
||||
}
|
||||
|
||||
if callbackType != callbackNone {
|
||||
f.callbacks[cKey{target, callbackType}] = fn
|
||||
}
|
||||
}
|
||||
|
||||
return f
|
||||
}
|
||||
|
||||
// Current returns the current state of the FSM.
|
||||
func (f *FSM) Current() string {
|
||||
f.stateMu.RLock()
|
||||
defer f.stateMu.RUnlock()
|
||||
return f.current
|
||||
}
|
||||
|
||||
// Is returns true if state is the current state.
|
||||
func (f *FSM) Is(state string) bool {
|
||||
f.stateMu.RLock()
|
||||
defer f.stateMu.RUnlock()
|
||||
return state == f.current
|
||||
}
|
||||
|
||||
// SetState allows the user to move to the given state from current state.
|
||||
// The call does not trigger any callbacks, if defined.
|
||||
func (f *FSM) SetState(state string) {
|
||||
f.stateMu.Lock()
|
||||
defer f.stateMu.Unlock()
|
||||
f.current = state
|
||||
return
|
||||
}
|
||||
|
||||
// Can returns true if event can occur in the current state.
|
||||
func (f *FSM) Can(event string) bool {
|
||||
f.stateMu.RLock()
|
||||
defer f.stateMu.RUnlock()
|
||||
_, ok := f.transitions[eKey{event, f.current}]
|
||||
return ok && (f.transition == nil)
|
||||
}
|
||||
|
||||
// AvailableTransitions returns a list of transitions avilable in the
|
||||
// current state.
|
||||
func (f *FSM) AvailableTransitions() []string {
|
||||
f.stateMu.RLock()
|
||||
defer f.stateMu.RUnlock()
|
||||
var transitions []string
|
||||
for key := range f.transitions {
|
||||
if key.src == f.current {
|
||||
transitions = append(transitions, key.event)
|
||||
}
|
||||
}
|
||||
return transitions
|
||||
}
|
||||
|
||||
// Cannot returns true if event can not occure in the current state.
|
||||
// It is a convenience method to help code read nicely.
|
||||
func (f *FSM) Cannot(event string) bool {
|
||||
return !f.Can(event)
|
||||
}
|
||||
|
||||
// Event initiates a state transition with the named event.
|
||||
//
|
||||
// The call takes a variable number of arguments that will be passed to the
|
||||
// callback, if defined.
|
||||
//
|
||||
// It will return nil if the state change is ok or one of these errors:
|
||||
//
|
||||
// - event X inappropriate because previous transition did not complete
|
||||
//
|
||||
// - event X inappropriate in current state Y
|
||||
//
|
||||
// - event X does not exist
|
||||
//
|
||||
// - internal error on state transition
|
||||
//
|
||||
// The last error should never occur in this situation and is a sign of an
|
||||
// internal bug.
|
||||
func (f *FSM) Event(event string, args ...interface{}) error {
|
||||
f.eventMu.Lock()
|
||||
defer f.eventMu.Unlock()
|
||||
|
||||
f.stateMu.RLock()
|
||||
defer f.stateMu.RUnlock()
|
||||
|
||||
if f.transition != nil {
|
||||
return InTransitionError{event}
|
||||
}
|
||||
|
||||
dst, ok := f.transitions[eKey{event, f.current}]
|
||||
if !ok {
|
||||
for ekey := range f.transitions {
|
||||
if ekey.event == event {
|
||||
return InvalidEventError{event, f.current}
|
||||
}
|
||||
}
|
||||
return UnknownEventError{event}
|
||||
}
|
||||
|
||||
e := &Event{f, event, f.current, dst, nil, args, false, false}
|
||||
|
||||
err := f.beforeEventCallbacks(e)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if f.current == dst {
|
||||
f.afterEventCallbacks(e)
|
||||
return NoTransitionError{e.Err}
|
||||
}
|
||||
|
||||
// Setup the transition, call it later.
|
||||
f.transition = func() {
|
||||
f.stateMu.Lock()
|
||||
f.current = dst
|
||||
f.stateMu.Unlock()
|
||||
|
||||
f.enterStateCallbacks(e)
|
||||
f.afterEventCallbacks(e)
|
||||
}
|
||||
|
||||
if err = f.leaveStateCallbacks(e); err != nil {
|
||||
if _, ok := err.(CanceledError); ok {
|
||||
f.transition = nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Perform the rest of the transition, if not asynchronous.
|
||||
f.stateMu.RUnlock()
|
||||
err = f.doTransition()
|
||||
f.stateMu.RLock()
|
||||
if err != nil {
|
||||
return InternalError{}
|
||||
}
|
||||
|
||||
return e.Err
|
||||
}
|
||||
|
||||
// Transition wraps transitioner.transition.
|
||||
func (f *FSM) Transition() error {
|
||||
f.eventMu.Lock()
|
||||
defer f.eventMu.Unlock()
|
||||
return f.doTransition()
|
||||
}
|
||||
|
||||
// doTransition wraps transitioner.transition.
|
||||
func (f *FSM) doTransition() error {
|
||||
return f.transitionerObj.transition(f)
|
||||
}
|
||||
|
||||
// transitionerStruct is the default implementation of the transitioner
|
||||
// interface. Other implementations can be swapped in for testing.
|
||||
type transitionerStruct struct{}
|
||||
|
||||
// Transition completes an asynchrounous state change.
|
||||
//
|
||||
// The callback for leave_<STATE> must prviously have called Async on its
|
||||
// event to have initiated an asynchronous state transition.
|
||||
func (t transitionerStruct) transition(f *FSM) error {
|
||||
if f.transition == nil {
|
||||
return NotInTransitionError{}
|
||||
}
|
||||
f.transition()
|
||||
f.transition = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// beforeEventCallbacks calls the before_ callbacks, first the named then the
|
||||
// general version.
|
||||
func (f *FSM) beforeEventCallbacks(e *Event) error {
|
||||
if fn, ok := f.callbacks[cKey{e.Event, callbackBeforeEvent}]; ok {
|
||||
fn(e)
|
||||
if e.canceled {
|
||||
return CanceledError{e.Err}
|
||||
}
|
||||
}
|
||||
if fn, ok := f.callbacks[cKey{"", callbackBeforeEvent}]; ok {
|
||||
fn(e)
|
||||
if e.canceled {
|
||||
return CanceledError{e.Err}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// leaveStateCallbacks calls the leave_ callbacks, first the named then the
|
||||
// general version.
|
||||
func (f *FSM) leaveStateCallbacks(e *Event) error {
|
||||
if fn, ok := f.callbacks[cKey{f.current, callbackLeaveState}]; ok {
|
||||
fn(e)
|
||||
if e.canceled {
|
||||
return CanceledError{e.Err}
|
||||
} else if e.async {
|
||||
return AsyncError{e.Err}
|
||||
}
|
||||
}
|
||||
if fn, ok := f.callbacks[cKey{"", callbackLeaveState}]; ok {
|
||||
fn(e)
|
||||
if e.canceled {
|
||||
return CanceledError{e.Err}
|
||||
} else if e.async {
|
||||
return AsyncError{e.Err}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// enterStateCallbacks calls the enter_ callbacks, first the named then the
|
||||
// general version.
|
||||
func (f *FSM) enterStateCallbacks(e *Event) {
|
||||
if fn, ok := f.callbacks[cKey{f.current, callbackEnterState}]; ok {
|
||||
fn(e)
|
||||
}
|
||||
if fn, ok := f.callbacks[cKey{"", callbackEnterState}]; ok {
|
||||
fn(e)
|
||||
}
|
||||
}
|
||||
|
||||
// afterEventCallbacks calls the after_ callbacks, first the named then the
|
||||
// general version.
|
||||
func (f *FSM) afterEventCallbacks(e *Event) {
|
||||
if fn, ok := f.callbacks[cKey{e.Event, callbackAfterEvent}]; ok {
|
||||
fn(e)
|
||||
}
|
||||
if fn, ok := f.callbacks[cKey{"", callbackAfterEvent}]; ok {
|
||||
fn(e)
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
callbackNone int = iota
|
||||
callbackBeforeEvent
|
||||
callbackLeaveState
|
||||
callbackEnterState
|
||||
callbackAfterEvent
|
||||
)
|
||||
|
||||
// cKey is a struct key used for keeping the callbacks mapped to a target.
|
||||
type cKey struct {
|
||||
// target is either the name of a state or an event depending on which
|
||||
// callback type the key refers to. It can also be "" for a non-targeted
|
||||
// callback like before_event.
|
||||
target string
|
||||
|
||||
// callbackType is the situation when the callback will be run.
|
||||
callbackType int
|
||||
}
|
||||
|
||||
// eKey is a struct key used for storing the transition map.
|
||||
type eKey struct {
|
||||
// event is the name of the event that the keys refers to.
|
||||
event string
|
||||
|
||||
// src is the source from where the event can transition.
|
||||
src string
|
||||
}
|
45
vendor/github.com/looplab/fsm/utils.go
generated
vendored
Normal file
45
vendor/github.com/looplab/fsm/utils.go
generated
vendored
Normal file
@ -0,0 +1,45 @@
|
||||
package fsm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Visualize outputs a visualization of a FSM in Graphviz format.
|
||||
func Visualize(fsm *FSM) string {
|
||||
var buf bytes.Buffer
|
||||
|
||||
states := make(map[string]int)
|
||||
|
||||
buf.WriteString(fmt.Sprintf(`digraph fsm {`))
|
||||
buf.WriteString("\n")
|
||||
|
||||
// make sure the initial state is at top
|
||||
for k, v := range fsm.transitions {
|
||||
if k.src == fsm.current {
|
||||
states[k.src]++
|
||||
states[v]++
|
||||
buf.WriteString(fmt.Sprintf(` "%s" -> "%s" [ label = "%s" ];`, k.src, v, k.event))
|
||||
buf.WriteString("\n")
|
||||
}
|
||||
}
|
||||
|
||||
for k, v := range fsm.transitions {
|
||||
if k.src != fsm.current {
|
||||
states[k.src]++
|
||||
states[v]++
|
||||
buf.WriteString(fmt.Sprintf(` "%s" -> "%s" [ label = "%s" ];`, k.src, v, k.event))
|
||||
buf.WriteString("\n")
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteString("\n")
|
||||
|
||||
for k := range states {
|
||||
buf.WriteString(fmt.Sprintf(` "%s";`, k))
|
||||
buf.WriteString("\n")
|
||||
}
|
||||
buf.WriteString(fmt.Sprintln("}"))
|
||||
|
||||
return buf.String()
|
||||
}
|
21
vendor/github.com/opentracing/basictracer-go/LICENSE
generated
vendored
Normal file
21
vendor/github.com/opentracing/basictracer-go/LICENSE
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2016 The OpenTracing Authors
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
42
vendor/github.com/opentracing/basictracer-go/context.go
generated
vendored
Normal file
42
vendor/github.com/opentracing/basictracer-go/context.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
package basictracer
|
||||
|
||||
// SpanContext holds the basic Span metadata.
|
||||
type SpanContext struct {
|
||||
// A probabilistically unique identifier for a [multi-span] trace.
|
||||
TraceID uint64
|
||||
|
||||
// A probabilistically unique identifier for a span.
|
||||
SpanID uint64
|
||||
|
||||
// Whether the trace is sampled.
|
||||
Sampled bool
|
||||
|
||||
// The span's associated baggage.
|
||||
Baggage map[string]string // initialized on first use
|
||||
}
|
||||
|
||||
// ForeachBaggageItem belongs to the opentracing.SpanContext interface
|
||||
func (c SpanContext) ForeachBaggageItem(handler func(k, v string) bool) {
|
||||
for k, v := range c.Baggage {
|
||||
if !handler(k, v) {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WithBaggageItem returns an entirely new basictracer SpanContext with the
|
||||
// given key:value baggage pair set.
|
||||
func (c SpanContext) WithBaggageItem(key, val string) SpanContext {
|
||||
var newBaggage map[string]string
|
||||
if c.Baggage == nil {
|
||||
newBaggage = map[string]string{key: val}
|
||||
} else {
|
||||
newBaggage = make(map[string]string, len(c.Baggage)+1)
|
||||
for k, v := range c.Baggage {
|
||||
newBaggage[k] = v
|
||||
}
|
||||
newBaggage[key] = val
|
||||
}
|
||||
// Use positional parameters so the compiler will help catch new fields.
|
||||
return SpanContext{c.TraceID, c.SpanID, c.Sampled, newBaggage}
|
||||
}
|
78
vendor/github.com/opentracing/basictracer-go/debug.go
generated
vendored
Normal file
78
vendor/github.com/opentracing/basictracer-go/debug.go
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
package basictracer
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const debugGoroutineIDTag = "_initial_goroutine"
|
||||
|
||||
type errAssertionFailed struct {
|
||||
span *spanImpl
|
||||
msg string
|
||||
}
|
||||
|
||||
// Error implements the error interface.
|
||||
func (err *errAssertionFailed) Error() string {
|
||||
return fmt.Sprintf("%s:\n%+v", err.msg, err.span)
|
||||
}
|
||||
|
||||
func (s *spanImpl) Lock() {
|
||||
s.Mutex.Lock()
|
||||
s.maybeAssertSanityLocked()
|
||||
}
|
||||
|
||||
func (s *spanImpl) maybeAssertSanityLocked() {
|
||||
if s.tracer == nil {
|
||||
s.Mutex.Unlock()
|
||||
panic(&errAssertionFailed{span: s, msg: "span used after call to Finish()"})
|
||||
}
|
||||
if s.tracer.options.DebugAssertSingleGoroutine {
|
||||
startID := curGoroutineID()
|
||||
curID, ok := s.raw.Tags[debugGoroutineIDTag].(uint64)
|
||||
if !ok {
|
||||
// This is likely invoked in the context of the SetTag which sets
|
||||
// debugGoroutineTag.
|
||||
return
|
||||
}
|
||||
if startID != curID {
|
||||
s.Mutex.Unlock()
|
||||
panic(&errAssertionFailed{
|
||||
span: s,
|
||||
msg: fmt.Sprintf("span started on goroutine %d, but now running on %d", startID, curID),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var goroutineSpace = []byte("goroutine ")
|
||||
var littleBuf = sync.Pool{
|
||||
New: func() interface{} {
|
||||
buf := make([]byte, 64)
|
||||
return &buf
|
||||
},
|
||||
}
|
||||
|
||||
// Credit to @bradfitz:
|
||||
// https://github.com/golang/net/blob/master/http2/gotrack.go#L51
|
||||
func curGoroutineID() uint64 {
|
||||
bp := littleBuf.Get().(*[]byte)
|
||||
defer littleBuf.Put(bp)
|
||||
b := *bp
|
||||
b = b[:runtime.Stack(b, false)]
|
||||
// Parse the 4707 out of "goroutine 4707 ["
|
||||
b = bytes.TrimPrefix(b, goroutineSpace)
|
||||
i := bytes.IndexByte(b, ' ')
|
||||
if i < 0 {
|
||||
panic(fmt.Sprintf("No space found in %q", b))
|
||||
}
|
||||
b = b[:i]
|
||||
n, err := strconv.ParseUint(string(b), 10, 64)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err))
|
||||
}
|
||||
return n
|
||||
}
|
62
vendor/github.com/opentracing/basictracer-go/event.go
generated
vendored
Normal file
62
vendor/github.com/opentracing/basictracer-go/event.go
generated
vendored
Normal file
@ -0,0 +1,62 @@
|
||||
package basictracer
|
||||
|
||||
import "github.com/opentracing/opentracing-go"
|
||||
|
||||
// A SpanEvent is emitted when a mutating command is called on a Span.
|
||||
type SpanEvent interface{}
|
||||
|
||||
// EventCreate is emitted when a Span is created.
|
||||
type EventCreate struct{ OperationName string }
|
||||
|
||||
// EventTag is received when SetTag is called.
|
||||
type EventTag struct {
|
||||
Key string
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
// EventBaggage is received when SetBaggageItem is called.
|
||||
type EventBaggage struct {
|
||||
Key, Value string
|
||||
}
|
||||
|
||||
// EventLogFields is received when LogFields or LogKV is called.
|
||||
type EventLogFields opentracing.LogRecord
|
||||
|
||||
// EventLog is received when Log (or one of its derivatives) is called.
|
||||
//
|
||||
// DEPRECATED
|
||||
type EventLog opentracing.LogData
|
||||
|
||||
// EventFinish is received when Finish is called.
|
||||
type EventFinish RawSpan
|
||||
|
||||
func (s *spanImpl) onCreate(opName string) {
|
||||
if s.event != nil {
|
||||
s.event(EventCreate{OperationName: opName})
|
||||
}
|
||||
}
|
||||
func (s *spanImpl) onTag(key string, value interface{}) {
|
||||
if s.event != nil {
|
||||
s.event(EventTag{Key: key, Value: value})
|
||||
}
|
||||
}
|
||||
func (s *spanImpl) onLog(ld opentracing.LogData) {
|
||||
if s.event != nil {
|
||||
s.event(EventLog(ld))
|
||||
}
|
||||
}
|
||||
func (s *spanImpl) onLogFields(lr opentracing.LogRecord) {
|
||||
if s.event != nil {
|
||||
s.event(EventLogFields(lr))
|
||||
}
|
||||
}
|
||||
func (s *spanImpl) onBaggage(key, value string) {
|
||||
if s.event != nil {
|
||||
s.event(EventBaggage{Key: key, Value: value})
|
||||
}
|
||||
}
|
||||
func (s *spanImpl) onFinish(sp RawSpan) {
|
||||
if s.event != nil {
|
||||
s.event(EventFinish(sp))
|
||||
}
|
||||
}
|
61
vendor/github.com/opentracing/basictracer-go/propagation.go
generated
vendored
Normal file
61
vendor/github.com/opentracing/basictracer-go/propagation.go
generated
vendored
Normal file
@ -0,0 +1,61 @@
|
||||
package basictracer
|
||||
|
||||
import opentracing "github.com/opentracing/opentracing-go"
|
||||
|
||||
type accessorPropagator struct {
|
||||
tracer *tracerImpl
|
||||
}
|
||||
|
||||
// DelegatingCarrier is a flexible carrier interface which can be implemented
|
||||
// by types which have a means of storing the trace metadata and already know
|
||||
// how to serialize themselves (for example, protocol buffers).
|
||||
type DelegatingCarrier interface {
|
||||
SetState(traceID, spanID uint64, sampled bool)
|
||||
State() (traceID, spanID uint64, sampled bool)
|
||||
SetBaggageItem(key, value string)
|
||||
GetBaggage(func(key, value string))
|
||||
}
|
||||
|
||||
func (p *accessorPropagator) Inject(
|
||||
spanContext opentracing.SpanContext,
|
||||
carrier interface{},
|
||||
) error {
|
||||
dc, ok := carrier.(DelegatingCarrier)
|
||||
if !ok || dc == nil {
|
||||
return opentracing.ErrInvalidCarrier
|
||||
}
|
||||
sc, ok := spanContext.(SpanContext)
|
||||
if !ok {
|
||||
return opentracing.ErrInvalidSpanContext
|
||||
}
|
||||
dc.SetState(sc.TraceID, sc.SpanID, sc.Sampled)
|
||||
for k, v := range sc.Baggage {
|
||||
dc.SetBaggageItem(k, v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *accessorPropagator) Extract(
|
||||
carrier interface{},
|
||||
) (opentracing.SpanContext, error) {
|
||||
dc, ok := carrier.(DelegatingCarrier)
|
||||
if !ok || dc == nil {
|
||||
return nil, opentracing.ErrInvalidCarrier
|
||||
}
|
||||
|
||||
traceID, spanID, sampled := dc.State()
|
||||
sc := SpanContext{
|
||||
TraceID: traceID,
|
||||
SpanID: spanID,
|
||||
Sampled: sampled,
|
||||
Baggage: nil,
|
||||
}
|
||||
dc.GetBaggage(func(k, v string) {
|
||||
if sc.Baggage == nil {
|
||||
sc.Baggage = map[string]string{}
|
||||
}
|
||||
sc.Baggage[k] = v
|
||||
})
|
||||
|
||||
return sc, nil
|
||||
}
|
180
vendor/github.com/opentracing/basictracer-go/propagation_ot.go
generated
vendored
Normal file
180
vendor/github.com/opentracing/basictracer-go/propagation_ot.go
generated
vendored
Normal file
@ -0,0 +1,180 @@
|
||||
package basictracer
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/gogo/protobuf/proto"
|
||||
"github.com/opentracing/basictracer-go/wire"
|
||||
opentracing "github.com/opentracing/opentracing-go"
|
||||
)
|
||||
|
||||
type textMapPropagator struct {
|
||||
tracer *tracerImpl
|
||||
}
|
||||
type binaryPropagator struct {
|
||||
tracer *tracerImpl
|
||||
}
|
||||
|
||||
const (
|
||||
prefixTracerState = "ot-tracer-"
|
||||
prefixBaggage = "ot-baggage-"
|
||||
|
||||
tracerStateFieldCount = 3
|
||||
fieldNameTraceID = prefixTracerState + "traceid"
|
||||
fieldNameSpanID = prefixTracerState + "spanid"
|
||||
fieldNameSampled = prefixTracerState + "sampled"
|
||||
)
|
||||
|
||||
func (p *textMapPropagator) Inject(
|
||||
spanContext opentracing.SpanContext,
|
||||
opaqueCarrier interface{},
|
||||
) error {
|
||||
sc, ok := spanContext.(SpanContext)
|
||||
if !ok {
|
||||
return opentracing.ErrInvalidSpanContext
|
||||
}
|
||||
carrier, ok := opaqueCarrier.(opentracing.TextMapWriter)
|
||||
if !ok {
|
||||
return opentracing.ErrInvalidCarrier
|
||||
}
|
||||
carrier.Set(fieldNameTraceID, strconv.FormatUint(sc.TraceID, 16))
|
||||
carrier.Set(fieldNameSpanID, strconv.FormatUint(sc.SpanID, 16))
|
||||
carrier.Set(fieldNameSampled, strconv.FormatBool(sc.Sampled))
|
||||
|
||||
for k, v := range sc.Baggage {
|
||||
carrier.Set(prefixBaggage+k, v)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *textMapPropagator) Extract(
|
||||
opaqueCarrier interface{},
|
||||
) (opentracing.SpanContext, error) {
|
||||
carrier, ok := opaqueCarrier.(opentracing.TextMapReader)
|
||||
if !ok {
|
||||
return nil, opentracing.ErrInvalidCarrier
|
||||
}
|
||||
requiredFieldCount := 0
|
||||
var traceID, spanID uint64
|
||||
var sampled bool
|
||||
var err error
|
||||
decodedBaggage := make(map[string]string)
|
||||
err = carrier.ForeachKey(func(k, v string) error {
|
||||
switch strings.ToLower(k) {
|
||||
case fieldNameTraceID:
|
||||
traceID, err = strconv.ParseUint(v, 16, 64)
|
||||
if err != nil {
|
||||
return opentracing.ErrSpanContextCorrupted
|
||||
}
|
||||
case fieldNameSpanID:
|
||||
spanID, err = strconv.ParseUint(v, 16, 64)
|
||||
if err != nil {
|
||||
return opentracing.ErrSpanContextCorrupted
|
||||
}
|
||||
case fieldNameSampled:
|
||||
sampled, err = strconv.ParseBool(v)
|
||||
if err != nil {
|
||||
return opentracing.ErrSpanContextCorrupted
|
||||
}
|
||||
default:
|
||||
lowercaseK := strings.ToLower(k)
|
||||
if strings.HasPrefix(lowercaseK, prefixBaggage) {
|
||||
decodedBaggage[strings.TrimPrefix(lowercaseK, prefixBaggage)] = v
|
||||
}
|
||||
// Balance off the requiredFieldCount++ just below...
|
||||
requiredFieldCount--
|
||||
}
|
||||
requiredFieldCount++
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if requiredFieldCount < tracerStateFieldCount {
|
||||
if requiredFieldCount == 0 {
|
||||
return nil, opentracing.ErrSpanContextNotFound
|
||||
}
|
||||
return nil, opentracing.ErrSpanContextCorrupted
|
||||
}
|
||||
|
||||
return SpanContext{
|
||||
TraceID: traceID,
|
||||
SpanID: spanID,
|
||||
Sampled: sampled,
|
||||
Baggage: decodedBaggage,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *binaryPropagator) Inject(
|
||||
spanContext opentracing.SpanContext,
|
||||
opaqueCarrier interface{},
|
||||
) error {
|
||||
sc, ok := spanContext.(SpanContext)
|
||||
if !ok {
|
||||
return opentracing.ErrInvalidSpanContext
|
||||
}
|
||||
carrier, ok := opaqueCarrier.(io.Writer)
|
||||
if !ok {
|
||||
return opentracing.ErrInvalidCarrier
|
||||
}
|
||||
|
||||
state := wire.TracerState{}
|
||||
state.TraceId = sc.TraceID
|
||||
state.SpanId = sc.SpanID
|
||||
state.Sampled = sc.Sampled
|
||||
state.BaggageItems = sc.Baggage
|
||||
|
||||
b, err := proto.Marshal(&state)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Write the length of the marshalled binary to the writer.
|
||||
length := uint32(len(b))
|
||||
if err := binary.Write(carrier, binary.BigEndian, &length); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = carrier.Write(b)
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *binaryPropagator) Extract(
|
||||
opaqueCarrier interface{},
|
||||
) (opentracing.SpanContext, error) {
|
||||
carrier, ok := opaqueCarrier.(io.Reader)
|
||||
if !ok {
|
||||
return nil, opentracing.ErrInvalidCarrier
|
||||
}
|
||||
|
||||
// Read the length of marshalled binary. io.ReadAll isn't that performant
|
||||
// since it keeps resizing the underlying buffer as it encounters more bytes
|
||||
// to read. By reading the length, we can allocate a fixed sized buf and read
|
||||
// the exact amount of bytes into it.
|
||||
var length uint32
|
||||
if err := binary.Read(carrier, binary.BigEndian, &length); err != nil {
|
||||
return nil, opentracing.ErrSpanContextCorrupted
|
||||
}
|
||||
buf := make([]byte, length)
|
||||
if n, err := carrier.Read(buf); err != nil {
|
||||
if n > 0 {
|
||||
return nil, opentracing.ErrSpanContextCorrupted
|
||||
}
|
||||
return nil, opentracing.ErrSpanContextNotFound
|
||||
}
|
||||
|
||||
ctx := wire.TracerState{}
|
||||
if err := proto.Unmarshal(buf, &ctx); err != nil {
|
||||
return nil, opentracing.ErrSpanContextCorrupted
|
||||
}
|
||||
|
||||
return SpanContext{
|
||||
TraceID: ctx.TraceId,
|
||||
SpanID: ctx.SpanId,
|
||||
Sampled: ctx.Sampled,
|
||||
Baggage: ctx.BaggageItems,
|
||||
}, nil
|
||||
}
|
34
vendor/github.com/opentracing/basictracer-go/raw.go
generated
vendored
Normal file
34
vendor/github.com/opentracing/basictracer-go/raw.go
generated
vendored
Normal file
@ -0,0 +1,34 @@
|
||||
package basictracer
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
opentracing "github.com/opentracing/opentracing-go"
|
||||
)
|
||||
|
||||
// RawSpan encapsulates all state associated with a (finished) Span.
|
||||
type RawSpan struct {
|
||||
// Those recording the RawSpan should also record the contents of its
|
||||
// SpanContext.
|
||||
Context SpanContext
|
||||
|
||||
// The SpanID of this SpanContext's first intra-trace reference (i.e.,
|
||||
// "parent"), or 0 if there is no parent.
|
||||
ParentSpanID uint64
|
||||
|
||||
// The name of the "operation" this span is an instance of. (Called a "span
|
||||
// name" in some implementations)
|
||||
Operation string
|
||||
|
||||
// We store <start, duration> rather than <start, end> so that only
|
||||
// one of the timestamps has global clock uncertainty issues.
|
||||
Start time.Time
|
||||
Duration time.Duration
|
||||
|
||||
// Essentially an extension mechanism. Can be used for many purposes,
|
||||
// not to be enumerated here.
|
||||
Tags opentracing.Tags
|
||||
|
||||
// The span's "microlog".
|
||||
Logs []opentracing.LogRecord
|
||||
}
|
60
vendor/github.com/opentracing/basictracer-go/recorder.go
generated
vendored
Normal file
60
vendor/github.com/opentracing/basictracer-go/recorder.go
generated
vendored
Normal file
@ -0,0 +1,60 @@
|
||||
package basictracer
|
||||
|
||||
import "sync"
|
||||
|
||||
// A SpanRecorder handles all of the `RawSpan` data generated via an
|
||||
// associated `Tracer` (see `NewStandardTracer`) instance. It also names
|
||||
// the containing process and provides access to a straightforward tag map.
|
||||
type SpanRecorder interface {
|
||||
// Implementations must determine whether and where to store `span`.
|
||||
RecordSpan(span RawSpan)
|
||||
}
|
||||
|
||||
// InMemorySpanRecorder is a simple thread-safe implementation of
|
||||
// SpanRecorder that stores all reported spans in memory, accessible
|
||||
// via reporter.GetSpans(). It is primarily intended for testing purposes.
|
||||
type InMemorySpanRecorder struct {
|
||||
sync.RWMutex
|
||||
spans []RawSpan
|
||||
}
|
||||
|
||||
// NewInMemoryRecorder creates new InMemorySpanRecorder
|
||||
func NewInMemoryRecorder() *InMemorySpanRecorder {
|
||||
return new(InMemorySpanRecorder)
|
||||
}
|
||||
|
||||
// RecordSpan implements the respective method of SpanRecorder.
|
||||
func (r *InMemorySpanRecorder) RecordSpan(span RawSpan) {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
r.spans = append(r.spans, span)
|
||||
}
|
||||
|
||||
// GetSpans returns a copy of the array of spans accumulated so far.
|
||||
func (r *InMemorySpanRecorder) GetSpans() []RawSpan {
|
||||
r.RLock()
|
||||
defer r.RUnlock()
|
||||
spans := make([]RawSpan, len(r.spans))
|
||||
copy(spans, r.spans)
|
||||
return spans
|
||||
}
|
||||
|
||||
// GetSampledSpans returns a slice of spans accumulated so far which were sampled.
|
||||
func (r *InMemorySpanRecorder) GetSampledSpans() []RawSpan {
|
||||
r.RLock()
|
||||
defer r.RUnlock()
|
||||
spans := make([]RawSpan, 0, len(r.spans))
|
||||
for _, span := range r.spans {
|
||||
if span.Context.Sampled {
|
||||
spans = append(spans, span)
|
||||
}
|
||||
}
|
||||
return spans
|
||||
}
|
||||
|
||||
// Reset clears the internal array of spans.
|
||||
func (r *InMemorySpanRecorder) Reset() {
|
||||
r.Lock()
|
||||
defer r.Unlock()
|
||||
r.spans = nil
|
||||
}
|
274
vendor/github.com/opentracing/basictracer-go/span.go
generated
vendored
Normal file
274
vendor/github.com/opentracing/basictracer-go/span.go
generated
vendored
Normal file
@ -0,0 +1,274 @@
|
||||
package basictracer
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
opentracing "github.com/opentracing/opentracing-go"
|
||||
"github.com/opentracing/opentracing-go/ext"
|
||||
"github.com/opentracing/opentracing-go/log"
|
||||
)
|
||||
|
||||
// Span provides access to the essential details of the span, for use
|
||||
// by basictracer consumers. These methods may only be called prior
|
||||
// to (*opentracing.Span).Finish().
|
||||
type Span interface {
|
||||
opentracing.Span
|
||||
|
||||
// Operation names the work done by this span instance
|
||||
Operation() string
|
||||
|
||||
// Start indicates when the span began
|
||||
Start() time.Time
|
||||
}
|
||||
|
||||
// Implements the `Span` interface. Created via tracerImpl (see
|
||||
// `basictracer.New()`).
|
||||
type spanImpl struct {
|
||||
tracer *tracerImpl
|
||||
event func(SpanEvent)
|
||||
sync.Mutex // protects the fields below
|
||||
raw RawSpan
|
||||
// The number of logs dropped because of MaxLogsPerSpan.
|
||||
numDroppedLogs int
|
||||
}
|
||||
|
||||
var spanPool = &sync.Pool{New: func() interface{} {
|
||||
return &spanImpl{}
|
||||
}}
|
||||
|
||||
func (s *spanImpl) reset() {
|
||||
s.tracer, s.event = nil, nil
|
||||
// Note: Would like to do the following, but then the consumer of RawSpan
|
||||
// (the recorder) needs to make sure that they're not holding on to the
|
||||
// baggage or logs when they return (i.e. they need to copy if they care):
|
||||
//
|
||||
// logs, baggage := s.raw.Logs[:0], s.raw.Baggage
|
||||
// for k := range baggage {
|
||||
// delete(baggage, k)
|
||||
// }
|
||||
// s.raw.Logs, s.raw.Baggage = logs, baggage
|
||||
//
|
||||
// That's likely too much to ask for. But there is some magic we should
|
||||
// be able to do with `runtime.SetFinalizer` to reclaim that memory into
|
||||
// a buffer pool when GC considers them unreachable, which should ease
|
||||
// some of the load. Hard to say how quickly that would be in practice
|
||||
// though.
|
||||
s.raw = RawSpan{
|
||||
Context: SpanContext{},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *spanImpl) SetOperationName(operationName string) opentracing.Span {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.raw.Operation = operationName
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *spanImpl) trim() bool {
|
||||
return !s.raw.Context.Sampled && s.tracer.options.TrimUnsampledSpans
|
||||
}
|
||||
|
||||
func (s *spanImpl) SetTag(key string, value interface{}) opentracing.Span {
|
||||
defer s.onTag(key, value)
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
if key == string(ext.SamplingPriority) {
|
||||
if v, ok := value.(uint16); ok {
|
||||
s.raw.Context.Sampled = v != 0
|
||||
return s
|
||||
}
|
||||
}
|
||||
if s.trim() {
|
||||
return s
|
||||
}
|
||||
|
||||
if s.raw.Tags == nil {
|
||||
s.raw.Tags = opentracing.Tags{}
|
||||
}
|
||||
s.raw.Tags[key] = value
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *spanImpl) LogKV(keyValues ...interface{}) {
|
||||
fields, err := log.InterleavedKVToFields(keyValues...)
|
||||
if err != nil {
|
||||
s.LogFields(log.Error(err), log.String("function", "LogKV"))
|
||||
return
|
||||
}
|
||||
s.LogFields(fields...)
|
||||
}
|
||||
|
||||
func (s *spanImpl) appendLog(lr opentracing.LogRecord) {
|
||||
maxLogs := s.tracer.options.MaxLogsPerSpan
|
||||
if maxLogs == 0 || len(s.raw.Logs) < maxLogs {
|
||||
s.raw.Logs = append(s.raw.Logs, lr)
|
||||
return
|
||||
}
|
||||
|
||||
// We have too many logs. We don't touch the first numOld logs; we treat the
|
||||
// rest as a circular buffer and overwrite the oldest log among those.
|
||||
numOld := (maxLogs - 1) / 2
|
||||
numNew := maxLogs - numOld
|
||||
s.raw.Logs[numOld+s.numDroppedLogs%numNew] = lr
|
||||
s.numDroppedLogs++
|
||||
}
|
||||
|
||||
func (s *spanImpl) LogFields(fields ...log.Field) {
|
||||
lr := opentracing.LogRecord{
|
||||
Fields: fields,
|
||||
}
|
||||
defer s.onLogFields(lr)
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
if s.trim() || s.tracer.options.DropAllLogs {
|
||||
return
|
||||
}
|
||||
if lr.Timestamp.IsZero() {
|
||||
lr.Timestamp = time.Now()
|
||||
}
|
||||
s.appendLog(lr)
|
||||
}
|
||||
|
||||
func (s *spanImpl) LogEvent(event string) {
|
||||
s.Log(opentracing.LogData{
|
||||
Event: event,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *spanImpl) LogEventWithPayload(event string, payload interface{}) {
|
||||
s.Log(opentracing.LogData{
|
||||
Event: event,
|
||||
Payload: payload,
|
||||
})
|
||||
}
|
||||
|
||||
func (s *spanImpl) Log(ld opentracing.LogData) {
|
||||
defer s.onLog(ld)
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
if s.trim() || s.tracer.options.DropAllLogs {
|
||||
return
|
||||
}
|
||||
|
||||
if ld.Timestamp.IsZero() {
|
||||
ld.Timestamp = time.Now()
|
||||
}
|
||||
|
||||
s.appendLog(ld.ToLogRecord())
|
||||
}
|
||||
|
||||
func (s *spanImpl) Finish() {
|
||||
s.FinishWithOptions(opentracing.FinishOptions{})
|
||||
}
|
||||
|
||||
// rotateLogBuffer rotates the records in the buffer: records 0 to pos-1 move at
|
||||
// the end (i.e. pos circular left shifts).
|
||||
func rotateLogBuffer(buf []opentracing.LogRecord, pos int) {
|
||||
// This algorithm is described in:
|
||||
// http://www.cplusplus.com/reference/algorithm/rotate
|
||||
for first, middle, next := 0, pos, pos; first != middle; {
|
||||
buf[first], buf[next] = buf[next], buf[first]
|
||||
first++
|
||||
next++
|
||||
if next == len(buf) {
|
||||
next = middle
|
||||
} else if first == middle {
|
||||
middle = next
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (s *spanImpl) FinishWithOptions(opts opentracing.FinishOptions) {
|
||||
finishTime := opts.FinishTime
|
||||
if finishTime.IsZero() {
|
||||
finishTime = time.Now()
|
||||
}
|
||||
duration := finishTime.Sub(s.raw.Start)
|
||||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
|
||||
for _, lr := range opts.LogRecords {
|
||||
s.appendLog(lr)
|
||||
}
|
||||
for _, ld := range opts.BulkLogData {
|
||||
s.appendLog(ld.ToLogRecord())
|
||||
}
|
||||
|
||||
if s.numDroppedLogs > 0 {
|
||||
// We dropped some log events, which means that we used part of Logs as a
|
||||
// circular buffer (see appendLog). De-circularize it.
|
||||
numOld := (len(s.raw.Logs) - 1) / 2
|
||||
numNew := len(s.raw.Logs) - numOld
|
||||
rotateLogBuffer(s.raw.Logs[numOld:], s.numDroppedLogs%numNew)
|
||||
|
||||
// Replace the log in the middle (the oldest "new" log) with information
|
||||
// about the dropped logs. This means that we are effectively dropping one
|
||||
// more "new" log.
|
||||
numDropped := s.numDroppedLogs + 1
|
||||
s.raw.Logs[numOld] = opentracing.LogRecord{
|
||||
// Keep the timestamp of the last dropped event.
|
||||
Timestamp: s.raw.Logs[numOld].Timestamp,
|
||||
Fields: []log.Field{
|
||||
log.String("event", "dropped Span logs"),
|
||||
log.Int("dropped_log_count", numDropped),
|
||||
log.String("component", "basictracer"),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
s.raw.Duration = duration
|
||||
|
||||
s.onFinish(s.raw)
|
||||
s.tracer.options.Recorder.RecordSpan(s.raw)
|
||||
|
||||
// Last chance to get options before the span is possibly reset.
|
||||
poolEnabled := s.tracer.options.EnableSpanPool
|
||||
if s.tracer.options.DebugAssertUseAfterFinish {
|
||||
// This makes it much more likely to catch a panic on any subsequent
|
||||
// operation since s.tracer is accessed on every call to `Lock`.
|
||||
// We don't call `reset()` here to preserve the logs in the Span
|
||||
// which are printed when the assertion triggers.
|
||||
s.tracer = nil
|
||||
}
|
||||
|
||||
if poolEnabled {
|
||||
spanPool.Put(s)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *spanImpl) Tracer() opentracing.Tracer {
|
||||
return s.tracer
|
||||
}
|
||||
|
||||
func (s *spanImpl) Context() opentracing.SpanContext {
|
||||
return s.raw.Context
|
||||
}
|
||||
|
||||
func (s *spanImpl) SetBaggageItem(key, val string) opentracing.Span {
|
||||
s.onBaggage(key, val)
|
||||
if s.trim() {
|
||||
return s
|
||||
}
|
||||
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
s.raw.Context = s.raw.Context.WithBaggageItem(key, val)
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *spanImpl) BaggageItem(key string) string {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
return s.raw.Context.Baggage[key]
|
||||
}
|
||||
|
||||
func (s *spanImpl) Operation() string {
|
||||
return s.raw.Operation
|
||||
}
|
||||
|
||||
func (s *spanImpl) Start() time.Time {
|
||||
return s.raw.Start
|
||||
}
|
262
vendor/github.com/opentracing/basictracer-go/tracer.go
generated
vendored
Normal file
262
vendor/github.com/opentracing/basictracer-go/tracer.go
generated
vendored
Normal file
@ -0,0 +1,262 @@
|
||||
package basictracer
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
opentracing "github.com/opentracing/opentracing-go"
|
||||
)
|
||||
|
||||
// Tracer extends the opentracing.Tracer interface with methods to
|
||||
// probe implementation state, for use by basictracer consumers.
|
||||
type Tracer interface {
|
||||
opentracing.Tracer
|
||||
|
||||
// Options gets the Options used in New() or NewWithOptions().
|
||||
Options() Options
|
||||
}
|
||||
|
||||
// Options allows creating a customized Tracer via NewWithOptions. The object
|
||||
// must not be updated when there is an active tracer using it.
|
||||
type Options struct {
|
||||
// ShouldSample is a function which is called when creating a new Span and
|
||||
// determines whether that Span is sampled. The randomized TraceID is supplied
|
||||
// to allow deterministic sampling decisions to be made across different nodes.
|
||||
// For example,
|
||||
//
|
||||
// func(traceID uint64) { return traceID % 64 == 0 }
|
||||
//
|
||||
// samples every 64th trace on average.
|
||||
ShouldSample func(traceID uint64) bool
|
||||
// TrimUnsampledSpans turns potentially expensive operations on unsampled
|
||||
// Spans into no-ops. More precisely, tags and log events are silently
|
||||
// discarded. If NewSpanEventListener is set, the callbacks will still fire.
|
||||
TrimUnsampledSpans bool
|
||||
// Recorder receives Spans which have been finished.
|
||||
Recorder SpanRecorder
|
||||
// NewSpanEventListener can be used to enhance the tracer by effectively
|
||||
// attaching external code to trace events. See NetTraceIntegrator for a
|
||||
// practical example, and event.go for the list of possible events.
|
||||
NewSpanEventListener func() func(SpanEvent)
|
||||
// DropAllLogs turns log events on all Spans into no-ops.
|
||||
// If NewSpanEventListener is set, the callbacks will still fire.
|
||||
DropAllLogs bool
|
||||
// MaxLogsPerSpan limits the number of Logs in a span (if set to a nonzero
|
||||
// value). If a span has more logs than this value, logs are dropped as
|
||||
// necessary (and replaced with a log describing how many were dropped).
|
||||
//
|
||||
// About half of the MaxLogPerSpan logs kept are the oldest logs, and about
|
||||
// half are the newest logs.
|
||||
//
|
||||
// If NewSpanEventListener is set, the callbacks will still fire for all log
|
||||
// events. This value is ignored if DropAllLogs is true.
|
||||
MaxLogsPerSpan int
|
||||
// DebugAssertSingleGoroutine internally records the ID of the goroutine
|
||||
// creating each Span and verifies that no operation is carried out on
|
||||
// it on a different goroutine.
|
||||
// Provided strictly for development purposes.
|
||||
// Passing Spans between goroutine without proper synchronization often
|
||||
// results in use-after-Finish() errors. For a simple example, consider the
|
||||
// following pseudocode:
|
||||
//
|
||||
// func (s *Server) Handle(req http.Request) error {
|
||||
// sp := s.StartSpan("server")
|
||||
// defer sp.Finish()
|
||||
// wait := s.queueProcessing(opentracing.ContextWithSpan(context.Background(), sp), req)
|
||||
// select {
|
||||
// case resp := <-wait:
|
||||
// return resp.Error
|
||||
// case <-time.After(10*time.Second):
|
||||
// sp.LogEvent("timed out waiting for processing")
|
||||
// return ErrTimedOut
|
||||
// }
|
||||
// }
|
||||
//
|
||||
// This looks reasonable at first, but a request which spends more than ten
|
||||
// seconds in the queue is abandoned by the main goroutine and its trace
|
||||
// finished, leading to use-after-finish when the request is finally
|
||||
// processed. Note also that even joining on to a finished Span via
|
||||
// StartSpanWithOptions constitutes an illegal operation.
|
||||
//
|
||||
// Code bases which do not require (or decide they do not want) Spans to
|
||||
// be passed across goroutine boundaries can run with this flag enabled in
|
||||
// tests to increase their chances of spotting wrong-doers.
|
||||
DebugAssertSingleGoroutine bool
|
||||
// DebugAssertUseAfterFinish is provided strictly for development purposes.
|
||||
// When set, it attempts to exacerbate issues emanating from use of Spans
|
||||
// after calling Finish by running additional assertions.
|
||||
DebugAssertUseAfterFinish bool
|
||||
// EnableSpanPool enables the use of a pool, so that the tracer reuses spans
|
||||
// after Finish has been called on it. Adds a slight performance gain as it
|
||||
// reduces allocations. However, if you have any use-after-finish race
|
||||
// conditions the code may panic.
|
||||
EnableSpanPool bool
|
||||
}
|
||||
|
||||
// DefaultOptions returns an Options object with a 1 in 64 sampling rate and
|
||||
// all options disabled. A Recorder needs to be set manually before using the
|
||||
// returned object with a Tracer.
|
||||
func DefaultOptions() Options {
|
||||
return Options{
|
||||
ShouldSample: func(traceID uint64) bool { return traceID%64 == 0 },
|
||||
MaxLogsPerSpan: 100,
|
||||
}
|
||||
}
|
||||
|
||||
// NewWithOptions creates a customized Tracer.
|
||||
func NewWithOptions(opts Options) opentracing.Tracer {
|
||||
rval := &tracerImpl{options: opts}
|
||||
rval.textPropagator = &textMapPropagator{rval}
|
||||
rval.binaryPropagator = &binaryPropagator{rval}
|
||||
rval.accessorPropagator = &accessorPropagator{rval}
|
||||
return rval
|
||||
}
|
||||
|
||||
// New creates and returns a standard Tracer which defers completed Spans to
|
||||
// `recorder`.
|
||||
// Spans created by this Tracer support the ext.SamplingPriority tag: Setting
|
||||
// ext.SamplingPriority causes the Span to be Sampled from that point on.
|
||||
func New(recorder SpanRecorder) opentracing.Tracer {
|
||||
opts := DefaultOptions()
|
||||
opts.Recorder = recorder
|
||||
return NewWithOptions(opts)
|
||||
}
|
||||
|
||||
// Implements the `Tracer` interface.
|
||||
type tracerImpl struct {
|
||||
options Options
|
||||
textPropagator *textMapPropagator
|
||||
binaryPropagator *binaryPropagator
|
||||
accessorPropagator *accessorPropagator
|
||||
}
|
||||
|
||||
func (t *tracerImpl) StartSpan(
|
||||
operationName string,
|
||||
opts ...opentracing.StartSpanOption,
|
||||
) opentracing.Span {
|
||||
sso := opentracing.StartSpanOptions{}
|
||||
for _, o := range opts {
|
||||
o.Apply(&sso)
|
||||
}
|
||||
return t.StartSpanWithOptions(operationName, sso)
|
||||
}
|
||||
|
||||
func (t *tracerImpl) getSpan() *spanImpl {
|
||||
if t.options.EnableSpanPool {
|
||||
sp := spanPool.Get().(*spanImpl)
|
||||
sp.reset()
|
||||
return sp
|
||||
}
|
||||
return &spanImpl{}
|
||||
}
|
||||
|
||||
func (t *tracerImpl) StartSpanWithOptions(
|
||||
operationName string,
|
||||
opts opentracing.StartSpanOptions,
|
||||
) opentracing.Span {
|
||||
// Start time.
|
||||
startTime := opts.StartTime
|
||||
if startTime.IsZero() {
|
||||
startTime = time.Now()
|
||||
}
|
||||
|
||||
// Tags.
|
||||
tags := opts.Tags
|
||||
|
||||
// Build the new span. This is the only allocation: We'll return this as
|
||||
// an opentracing.Span.
|
||||
sp := t.getSpan()
|
||||
// Look for a parent in the list of References.
|
||||
//
|
||||
// TODO: would be nice if basictracer did something with all
|
||||
// References, not just the first one.
|
||||
ReferencesLoop:
|
||||
for _, ref := range opts.References {
|
||||
switch ref.Type {
|
||||
case opentracing.ChildOfRef,
|
||||
opentracing.FollowsFromRef:
|
||||
|
||||
refCtx := ref.ReferencedContext.(SpanContext)
|
||||
sp.raw.Context.TraceID = refCtx.TraceID
|
||||
sp.raw.Context.SpanID = randomID()
|
||||
sp.raw.Context.Sampled = refCtx.Sampled
|
||||
sp.raw.ParentSpanID = refCtx.SpanID
|
||||
|
||||
if l := len(refCtx.Baggage); l > 0 {
|
||||
sp.raw.Context.Baggage = make(map[string]string, l)
|
||||
for k, v := range refCtx.Baggage {
|
||||
sp.raw.Context.Baggage[k] = v
|
||||
}
|
||||
}
|
||||
break ReferencesLoop
|
||||
}
|
||||
}
|
||||
if sp.raw.Context.TraceID == 0 {
|
||||
// No parent Span found; allocate new trace and span ids and determine
|
||||
// the Sampled status.
|
||||
sp.raw.Context.TraceID, sp.raw.Context.SpanID = randomID2()
|
||||
sp.raw.Context.Sampled = t.options.ShouldSample(sp.raw.Context.TraceID)
|
||||
}
|
||||
|
||||
return t.startSpanInternal(
|
||||
sp,
|
||||
operationName,
|
||||
startTime,
|
||||
tags,
|
||||
)
|
||||
}
|
||||
|
||||
func (t *tracerImpl) startSpanInternal(
|
||||
sp *spanImpl,
|
||||
operationName string,
|
||||
startTime time.Time,
|
||||
tags opentracing.Tags,
|
||||
) opentracing.Span {
|
||||
sp.tracer = t
|
||||
if t.options.NewSpanEventListener != nil {
|
||||
sp.event = t.options.NewSpanEventListener()
|
||||
}
|
||||
sp.raw.Operation = operationName
|
||||
sp.raw.Start = startTime
|
||||
sp.raw.Duration = -1
|
||||
sp.raw.Tags = tags
|
||||
if t.options.DebugAssertSingleGoroutine {
|
||||
sp.SetTag(debugGoroutineIDTag, curGoroutineID())
|
||||
}
|
||||
defer sp.onCreate(operationName)
|
||||
return sp
|
||||
}
|
||||
|
||||
type delegatorType struct{}
|
||||
|
||||
// Delegator is the format to use for DelegatingCarrier.
|
||||
var Delegator delegatorType
|
||||
|
||||
func (t *tracerImpl) Inject(sc opentracing.SpanContext, format interface{}, carrier interface{}) error {
|
||||
switch format {
|
||||
case opentracing.TextMap, opentracing.HTTPHeaders:
|
||||
return t.textPropagator.Inject(sc, carrier)
|
||||
case opentracing.Binary:
|
||||
return t.binaryPropagator.Inject(sc, carrier)
|
||||
}
|
||||
if _, ok := format.(delegatorType); ok {
|
||||
return t.accessorPropagator.Inject(sc, carrier)
|
||||
}
|
||||
return opentracing.ErrUnsupportedFormat
|
||||
}
|
||||
|
||||
func (t *tracerImpl) Extract(format interface{}, carrier interface{}) (opentracing.SpanContext, error) {
|
||||
switch format {
|
||||
case opentracing.TextMap, opentracing.HTTPHeaders:
|
||||
return t.textPropagator.Extract(carrier)
|
||||
case opentracing.Binary:
|
||||
return t.binaryPropagator.Extract(carrier)
|
||||
}
|
||||
if _, ok := format.(delegatorType); ok {
|
||||
return t.accessorPropagator.Extract(carrier)
|
||||
}
|
||||
return nil, opentracing.ErrUnsupportedFormat
|
||||
}
|
||||
|
||||
func (t *tracerImpl) Options() Options {
|
||||
return t.options
|
||||
}
|
25
vendor/github.com/opentracing/basictracer-go/util.go
generated
vendored
Normal file
25
vendor/github.com/opentracing/basictracer-go/util.go
generated
vendored
Normal file
@ -0,0 +1,25 @@
|
||||
package basictracer
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
var (
|
||||
seededIDGen = rand.New(rand.NewSource(time.Now().UnixNano()))
|
||||
// The golang rand generators are *not* intrinsically thread-safe.
|
||||
seededIDLock sync.Mutex
|
||||
)
|
||||
|
||||
func randomID() uint64 {
|
||||
seededIDLock.Lock()
|
||||
defer seededIDLock.Unlock()
|
||||
return uint64(seededIDGen.Int63())
|
||||
}
|
||||
|
||||
func randomID2() (uint64, uint64) {
|
||||
seededIDLock.Lock()
|
||||
defer seededIDLock.Unlock()
|
||||
return uint64(seededIDGen.Int63()), uint64(seededIDGen.Int63())
|
||||
}
|
40
vendor/github.com/opentracing/basictracer-go/wire/carrier.go
generated
vendored
Normal file
40
vendor/github.com/opentracing/basictracer-go/wire/carrier.go
generated
vendored
Normal file
@ -0,0 +1,40 @@
|
||||
package wire
|
||||
|
||||
// ProtobufCarrier is a DelegatingCarrier that uses protocol buffers as the
|
||||
// the underlying datastructure. The reason for implementing DelagatingCarrier
|
||||
// is to allow for end users to serialize the underlying protocol buffers using
|
||||
// jsonpb or any other serialization forms they want.
|
||||
type ProtobufCarrier TracerState
|
||||
|
||||
// SetState set's the tracer state.
|
||||
func (p *ProtobufCarrier) SetState(traceID, spanID uint64, sampled bool) {
|
||||
p.TraceId = traceID
|
||||
p.SpanId = spanID
|
||||
p.Sampled = sampled
|
||||
}
|
||||
|
||||
// State returns the tracer state.
|
||||
func (p *ProtobufCarrier) State() (traceID, spanID uint64, sampled bool) {
|
||||
traceID = p.TraceId
|
||||
spanID = p.SpanId
|
||||
sampled = p.Sampled
|
||||
return traceID, spanID, sampled
|
||||
}
|
||||
|
||||
// SetBaggageItem sets a baggage item.
|
||||
func (p *ProtobufCarrier) SetBaggageItem(key, value string) {
|
||||
if p.BaggageItems == nil {
|
||||
p.BaggageItems = map[string]string{key: value}
|
||||
return
|
||||
}
|
||||
|
||||
p.BaggageItems[key] = value
|
||||
}
|
||||
|
||||
// GetBaggage iterates over each baggage item and executes the callback with
|
||||
// the key:value pair.
|
||||
func (p *ProtobufCarrier) GetBaggage(f func(k, v string)) {
|
||||
for k, v := range p.BaggageItems {
|
||||
f(k, v)
|
||||
}
|
||||
}
|
6
vendor/github.com/opentracing/basictracer-go/wire/gen.go
generated
vendored
Normal file
6
vendor/github.com/opentracing/basictracer-go/wire/gen.go
generated
vendored
Normal file
@ -0,0 +1,6 @@
|
||||
package wire
|
||||
|
||||
//go:generate protoc --gogofaster_out=$GOPATH/src/github.com/opentracing/basictracer-go/wire wire.proto
|
||||
|
||||
// Run `go get github.com/gogo/protobuf/protoc-gen-gogofaster` to install the
|
||||
// gogofaster generator binary.
|
508
vendor/github.com/opentracing/basictracer-go/wire/wire.pb.go
generated
vendored
Normal file
508
vendor/github.com/opentracing/basictracer-go/wire/wire.pb.go
generated
vendored
Normal file
@ -0,0 +1,508 @@
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// source: wire.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package wire is a generated protocol buffer package.
|
||||
|
||||
It is generated from these files:
|
||||
wire.proto
|
||||
|
||||
It has these top-level messages:
|
||||
TracerState
|
||||
*/
|
||||
package wire
|
||||
|
||||
import proto "github.com/gogo/protobuf/proto"
|
||||
import fmt "fmt"
|
||||
import math "math"
|
||||
|
||||
import io "io"
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
var _ = proto.Marshal
|
||||
var _ = fmt.Errorf
|
||||
var _ = math.Inf
|
||||
|
||||
// This is a compile-time assertion to ensure that this generated file
|
||||
// is compatible with the proto package it is being compiled against.
|
||||
const _ = proto.GoGoProtoPackageIsVersion1
|
||||
|
||||
type TracerState struct {
|
||||
TraceId uint64 `protobuf:"fixed64,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
|
||||
SpanId uint64 `protobuf:"fixed64,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
|
||||
Sampled bool `protobuf:"varint,3,opt,name=sampled,proto3" json:"sampled,omitempty"`
|
||||
BaggageItems map[string]string `protobuf:"bytes,4,rep,name=baggage_items,json=baggageItems" json:"baggage_items,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
|
||||
}
|
||||
|
||||
func (m *TracerState) Reset() { *m = TracerState{} }
|
||||
func (m *TracerState) String() string { return proto.CompactTextString(m) }
|
||||
func (*TracerState) ProtoMessage() {}
|
||||
func (*TracerState) Descriptor() ([]byte, []int) { return fileDescriptorWire, []int{0} }
|
||||
|
||||
func (m *TracerState) GetBaggageItems() map[string]string {
|
||||
if m != nil {
|
||||
return m.BaggageItems
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
proto.RegisterType((*TracerState)(nil), "basictracer_go.wire.TracerState")
|
||||
}
|
||||
func (m *TracerState) Marshal() (data []byte, err error) {
|
||||
size := m.Size()
|
||||
data = make([]byte, size)
|
||||
n, err := m.MarshalTo(data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return data[:n], nil
|
||||
}
|
||||
|
||||
func (m *TracerState) MarshalTo(data []byte) (int, error) {
|
||||
var i int
|
||||
_ = i
|
||||
var l int
|
||||
_ = l
|
||||
if m.TraceId != 0 {
|
||||
data[i] = 0x9
|
||||
i++
|
||||
i = encodeFixed64Wire(data, i, uint64(m.TraceId))
|
||||
}
|
||||
if m.SpanId != 0 {
|
||||
data[i] = 0x11
|
||||
i++
|
||||
i = encodeFixed64Wire(data, i, uint64(m.SpanId))
|
||||
}
|
||||
if m.Sampled {
|
||||
data[i] = 0x18
|
||||
i++
|
||||
if m.Sampled {
|
||||
data[i] = 1
|
||||
} else {
|
||||
data[i] = 0
|
||||
}
|
||||
i++
|
||||
}
|
||||
if len(m.BaggageItems) > 0 {
|
||||
for k, _ := range m.BaggageItems {
|
||||
data[i] = 0x22
|
||||
i++
|
||||
v := m.BaggageItems[k]
|
||||
mapSize := 1 + len(k) + sovWire(uint64(len(k))) + 1 + len(v) + sovWire(uint64(len(v)))
|
||||
i = encodeVarintWire(data, i, uint64(mapSize))
|
||||
data[i] = 0xa
|
||||
i++
|
||||
i = encodeVarintWire(data, i, uint64(len(k)))
|
||||
i += copy(data[i:], k)
|
||||
data[i] = 0x12
|
||||
i++
|
||||
i = encodeVarintWire(data, i, uint64(len(v)))
|
||||
i += copy(data[i:], v)
|
||||
}
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Wire(data []byte, offset int, v uint64) int {
|
||||
data[offset] = uint8(v)
|
||||
data[offset+1] = uint8(v >> 8)
|
||||
data[offset+2] = uint8(v >> 16)
|
||||
data[offset+3] = uint8(v >> 24)
|
||||
data[offset+4] = uint8(v >> 32)
|
||||
data[offset+5] = uint8(v >> 40)
|
||||
data[offset+6] = uint8(v >> 48)
|
||||
data[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Wire(data []byte, offset int, v uint32) int {
|
||||
data[offset] = uint8(v)
|
||||
data[offset+1] = uint8(v >> 8)
|
||||
data[offset+2] = uint8(v >> 16)
|
||||
data[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintWire(data []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
data[offset] = uint8(v&0x7f | 0x80)
|
||||
v >>= 7
|
||||
offset++
|
||||
}
|
||||
data[offset] = uint8(v)
|
||||
return offset + 1
|
||||
}
|
||||
func (m *TracerState) Size() (n int) {
|
||||
var l int
|
||||
_ = l
|
||||
if m.TraceId != 0 {
|
||||
n += 9
|
||||
}
|
||||
if m.SpanId != 0 {
|
||||
n += 9
|
||||
}
|
||||
if m.Sampled {
|
||||
n += 2
|
||||
}
|
||||
if len(m.BaggageItems) > 0 {
|
||||
for k, v := range m.BaggageItems {
|
||||
_ = k
|
||||
_ = v
|
||||
mapEntrySize := 1 + len(k) + sovWire(uint64(len(k))) + 1 + len(v) + sovWire(uint64(len(v)))
|
||||
n += mapEntrySize + 1 + sovWire(uint64(mapEntrySize))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
func sovWire(x uint64) (n int) {
|
||||
for {
|
||||
n++
|
||||
x >>= 7
|
||||
if x == 0 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
func sozWire(x uint64) (n int) {
|
||||
return sovWire(uint64((x << 1) ^ uint64((int64(x) >> 63))))
|
||||
}
|
||||
func (m *TracerState) Unmarshal(data []byte) error {
|
||||
l := len(data)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
preIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowWire
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
fieldNum := int32(wire >> 3)
|
||||
wireType := int(wire & 0x7)
|
||||
if wireType == 4 {
|
||||
return fmt.Errorf("proto: TracerState: wiretype end group for non-group")
|
||||
}
|
||||
if fieldNum <= 0 {
|
||||
return fmt.Errorf("proto: TracerState: illegal tag %d (wire type %d)", fieldNum, wire)
|
||||
}
|
||||
switch fieldNum {
|
||||
case 1:
|
||||
if wireType != 1 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
|
||||
}
|
||||
m.TraceId = 0
|
||||
if (iNdEx + 8) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += 8
|
||||
m.TraceId = uint64(data[iNdEx-8])
|
||||
m.TraceId |= uint64(data[iNdEx-7]) << 8
|
||||
m.TraceId |= uint64(data[iNdEx-6]) << 16
|
||||
m.TraceId |= uint64(data[iNdEx-5]) << 24
|
||||
m.TraceId |= uint64(data[iNdEx-4]) << 32
|
||||
m.TraceId |= uint64(data[iNdEx-3]) << 40
|
||||
m.TraceId |= uint64(data[iNdEx-2]) << 48
|
||||
m.TraceId |= uint64(data[iNdEx-1]) << 56
|
||||
case 2:
|
||||
if wireType != 1 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
|
||||
}
|
||||
m.SpanId = 0
|
||||
if (iNdEx + 8) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += 8
|
||||
m.SpanId = uint64(data[iNdEx-8])
|
||||
m.SpanId |= uint64(data[iNdEx-7]) << 8
|
||||
m.SpanId |= uint64(data[iNdEx-6]) << 16
|
||||
m.SpanId |= uint64(data[iNdEx-5]) << 24
|
||||
m.SpanId |= uint64(data[iNdEx-4]) << 32
|
||||
m.SpanId |= uint64(data[iNdEx-3]) << 40
|
||||
m.SpanId |= uint64(data[iNdEx-2]) << 48
|
||||
m.SpanId |= uint64(data[iNdEx-1]) << 56
|
||||
case 3:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Sampled", wireType)
|
||||
}
|
||||
var v int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowWire
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
m.Sampled = bool(v != 0)
|
||||
case 4:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field BaggageItems", wireType)
|
||||
}
|
||||
var msglen int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowWire
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
msglen |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if msglen < 0 {
|
||||
return ErrInvalidLengthWire
|
||||
}
|
||||
postIndex := iNdEx + msglen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
var keykey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowWire
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
keykey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowWire
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthWire
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapkey := string(data[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
var valuekey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowWire
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
valuekey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var stringLenmapvalue uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowWire
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapvalue := int(stringLenmapvalue)
|
||||
if intStringLenmapvalue < 0 {
|
||||
return ErrInvalidLengthWire
|
||||
}
|
||||
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||
if postStringIndexmapvalue > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue := string(data[iNdEx:postStringIndexmapvalue])
|
||||
iNdEx = postStringIndexmapvalue
|
||||
if m.BaggageItems == nil {
|
||||
m.BaggageItems = make(map[string]string)
|
||||
}
|
||||
m.BaggageItems[mapkey] = mapvalue
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipWire(data[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthWire
|
||||
}
|
||||
if (iNdEx + skippy) > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
}
|
||||
|
||||
if iNdEx > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func skipWire(data []byte) (n int, err error) {
|
||||
l := len(data)
|
||||
iNdEx := 0
|
||||
for iNdEx < l {
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowWire
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
wireType := int(wire & 0x7)
|
||||
switch wireType {
|
||||
case 0:
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowWire
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx++
|
||||
if data[iNdEx-1] < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 1:
|
||||
iNdEx += 8
|
||||
return iNdEx, nil
|
||||
case 2:
|
||||
var length int
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowWire
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
length |= (int(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
iNdEx += length
|
||||
if length < 0 {
|
||||
return 0, ErrInvalidLengthWire
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 3:
|
||||
for {
|
||||
var innerWire uint64
|
||||
var start int = iNdEx
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return 0, ErrIntOverflowWire
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return 0, io.ErrUnexpectedEOF
|
||||
}
|
||||
b := data[iNdEx]
|
||||
iNdEx++
|
||||
innerWire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
innerWireType := int(innerWire & 0x7)
|
||||
if innerWireType == 4 {
|
||||
break
|
||||
}
|
||||
next, err := skipWire(data[start:])
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
iNdEx = start + next
|
||||
}
|
||||
return iNdEx, nil
|
||||
case 4:
|
||||
return iNdEx, nil
|
||||
case 5:
|
||||
iNdEx += 4
|
||||
return iNdEx, nil
|
||||
default:
|
||||
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
|
||||
}
|
||||
}
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
var (
|
||||
ErrInvalidLengthWire = fmt.Errorf("proto: negative length found during unmarshaling")
|
||||
ErrIntOverflowWire = fmt.Errorf("proto: integer overflow")
|
||||
)
|
||||
|
||||
var fileDescriptorWire = []byte{
|
||||
// 234 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xcf, 0x2c, 0x4a,
|
||||
0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0x4e, 0x4a, 0x2c, 0xce, 0x4c, 0x2e, 0x29, 0x4a,
|
||||
0x4c, 0x4e, 0x2d, 0x8a, 0x4f, 0xcf, 0xd7, 0x03, 0x49, 0x29, 0x7d, 0x65, 0xe4, 0xe2, 0x0e, 0x01,
|
||||
0x0b, 0x05, 0x97, 0x24, 0x96, 0xa4, 0x0a, 0x49, 0x72, 0x71, 0x80, 0x55, 0xc4, 0x67, 0xa6, 0x48,
|
||||
0x30, 0x2a, 0x30, 0x6a, 0xb0, 0x05, 0xb1, 0x83, 0xf9, 0x9e, 0x29, 0x42, 0xe2, 0x5c, 0xec, 0xc5,
|
||||
0x05, 0x89, 0x79, 0x20, 0x19, 0x26, 0xb0, 0x0c, 0x1b, 0x88, 0x0b, 0x94, 0x90, 0x00, 0x4a, 0x24,
|
||||
0xe6, 0x16, 0xe4, 0xa4, 0xa6, 0x48, 0x30, 0x03, 0x25, 0x38, 0x82, 0x60, 0x5c, 0xa1, 0x70, 0x2e,
|
||||
0xde, 0xa4, 0xc4, 0xf4, 0xf4, 0xc4, 0x74, 0xa0, 0x79, 0x25, 0xa9, 0xb9, 0xc5, 0x12, 0x2c, 0x0a,
|
||||
0xcc, 0x1a, 0xdc, 0x46, 0x46, 0x7a, 0x58, 0x9c, 0xa2, 0x87, 0xe4, 0x0c, 0x3d, 0x27, 0x88, 0x2e,
|
||||
0x4f, 0x90, 0x26, 0xd7, 0xbc, 0x92, 0xa2, 0xca, 0x20, 0x9e, 0x24, 0x24, 0x21, 0x29, 0x7b, 0x2e,
|
||||
0x41, 0x0c, 0x25, 0x42, 0x02, 0x5c, 0xcc, 0xd9, 0xa9, 0x95, 0x60, 0x67, 0x73, 0x06, 0x81, 0x98,
|
||||
0x42, 0x22, 0x5c, 0xac, 0x65, 0x89, 0x39, 0xa5, 0xa9, 0x60, 0x07, 0x73, 0x06, 0x41, 0x38, 0x56,
|
||||
0x4c, 0x16, 0x8c, 0x4e, 0x62, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x00, 0xe2, 0x07, 0x40, 0x3c, 0xe1,
|
||||
0xb1, 0x1c, 0x43, 0x14, 0x0b, 0xc8, 0x11, 0x49, 0x6c, 0xe0, 0xb0, 0x32, 0x06, 0x04, 0x00, 0x00,
|
||||
0xff, 0xff, 0x0a, 0x20, 0x89, 0x38, 0x39, 0x01, 0x00, 0x00,
|
||||
}
|
Loading…
Reference in New Issue
Block a user