If you are looking to start capturing deeper insights around the work your application is performing, open telemetry is the way to go for capturing data points while using a consistent api surface.
Here is a quick snippet for how you can setup app engine/cloud run/cloud functions/GKE with open telemetry in GCP. This will leverage using the X-Cloud-Trace-Context
header that comes in from every request from the GFE.
Source code for the propagationgcp pacakge can be found here
https://github.com/amammay/propagationgcp
package main
import (
"cloud.google.com/go/compute/metadata"
"context"
"fmt"
texporter "github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace"
"github.com/amammay/propagationgcp" // ablility to parse X-Cloud-Trace-Context
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/exporters/stdout"
"go.opentelemetry.io/otel/propagation"
sdktrace "go.opentelemetry.io/otel/sdk/trace"
"go.opentelemetry.io/otel/trace"
"log"
"net/http"
"os"
"time"
)
func main() {
if err := run(); err != nil {
fmt.Fprintf(os.Stderr, "run(): %v", err)
os.Exit(1)
}
}
func run() error {
projectID := ""
onGCE := metadata.OnGCE()
if onGCE {
// infer our project id from the metadata server
id, err := metadata.ProjectID()
if err != nil {
return fmt.Errorf("metadata.ProjectID(): %v", err)
}
projectID = id
}
// init open telem, while allowing us to defer the teardown and flushing of our exporter
tracerShutdown, err := initTracer(context.Background(), projectID, onGCE)
if err != nil {
return fmt.Errorf("initTracer(): %v", err)
}
defer tracerShutdown()
mux := http.NewServeMux()
mux.HandleFunc("/", func(writer http.ResponseWriter, request *http.Request) {
ctx := request.Context()
//create our custom span of work
ctx, span := trace.SpanFromContext(ctx).Tracer().Start(ctx, "hello-world")
defer span.End()
writer.Write([]byte("hello world"))
})
// add http middleware for tracing
otelWrapper := otelhttp.NewHandler(mux, "requesthandler")
port := os.Getenv("PORT")
if port == "" {
port = "8080"
}
httpServer := http.Server{
Addr: ":" + port,
Handler: otelWrapper,
ReadTimeout: 45 * time.Second,
WriteTimeout: 45 * time.Second,
}
log.Printf("starting server on %q", httpServer.Addr)
return httpServer.ListenAndServe()
}
func initTracer(ctx context.Context, projectID string, onGCE bool) (func(), error) {
var tpos []sdktrace.TracerProviderOption
// if our code is running on GCP (cloud run/functions/app engine/gke) then we will export directly to cloud tracing
if onGCE {
exporter, err := texporter.NewExporter(texporter.WithProjectID(projectID))
if err != nil {
return nil, fmt.Errorf("texporter.NewExporter(): %v", err)
}
tpos = append(tpos, sdktrace.WithBatcher(exporter))
} else {
exporter, err := stdout.NewExporter(stdout.WithPrettyPrint())
if err != nil {
return nil, fmt.Errorf("stdout.NewExporter(): %v", err)
}
tpos = append(tpos, sdktrace.WithBatcher(exporter))
}
tp := sdktrace.NewTracerProvider(tpos...)
otel.SetTracerProvider(tp)
propagator := propagation.NewCompositeTextMapPropagator(
propagation.TraceContext{},
propagation.Baggage{},
propagationgcp.HTTPFormat{}, //adding this propagator allows for extracting X-Cloud-Trace-Context
)
otel.SetTextMapPropagator(propagator)
return func() {
err := tp.ForceFlush(ctx)
if err != nil {
log.Printf("tracerProvider.ForceFlush(): %v", err)
}
}, nil
}
Top comments (0)