Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feature: allow object manifest to define custom DSN #72

Merged
merged 9 commits into from
Dec 13, 2023
70 changes: 58 additions & 12 deletions crons.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,6 +78,14 @@ func startCronsInformers(ctx context.Context, namespace string) error {
// or if the job exited
func runSentryCronsCheckin(ctx context.Context, job *batchv1.Job, eventHandlerType EventHandlerType) error {

hub := sentry.GetHubFromContext(ctx)
if hub == nil {
return errors.New("cannot get hub from context")
}

// to avoid concurrency issue
hub = hub.Clone()

// Try to find the cronJob name that owns the job
// in order to get the crons monitor data
if len(job.OwnerReferences) == 0 {
Expand All @@ -92,16 +100,44 @@ func runSentryCronsCheckin(ctx context.Context, job *batchv1.Job, eventHandlerTy
return errors.New("cannot find cronJob data")
}

// The job just begun so check in to start
if job.Status.Active == 0 && job.Status.Succeeded == 0 && job.Status.Failed == 0 {
// Add the job to the cronJob informer data
checkinJobStarting(ctx, job, cronsMonitorData)
} else if job.Status.Active > 0 {
return nil
} else if job.Status.Failed > 0 || job.Status.Succeeded > 0 {
checkinJobEnding(ctx, job, cronsMonitorData)
return nil // finished
}
hub.WithScope(func(scope *sentry.Scope) {
altDsn, err := searchDsn(ctx, job.ObjectMeta)
if err != nil {
return
}

// if we did find an alternative DSN
if altDsn != "" {
// attempt to retrieve the corresponding client
client, _ := dsnData.GetClient(altDsn)

if client == nil {
newOptions := hub.Client().Options()
newOptions.Dsn = altDsn
client, err = dsnData.AddClient(newOptions)
if err != nil {
return
}
}

// bind the alternative client to the top layer
hub.BindClient(client)
}

// pass clone hub down with context
ctx = sentry.SetHubOnContext(ctx, hub)
// The job just begun so check in to start
if job.Status.Active == 0 && job.Status.Succeeded == 0 && job.Status.Failed == 0 {
// Add the job to the cronJob informer data
checkinJobStarting(ctx, job, cronsMonitorData)
} else if job.Status.Active > 0 {
return
} else if job.Status.Failed > 0 || job.Status.Succeeded > 0 {
checkinJobEnding(ctx, job, cronsMonitorData)
return // finished
}
})

return nil
}

Expand All @@ -110,6 +146,11 @@ func checkinJobStarting(ctx context.Context, job *batchv1.Job, cronsMonitorData

logger := zerolog.Ctx(ctx)

hub := sentry.GetHubFromContext(ctx)
if hub == nil {
return errors.New("cannot get hub from context")
}

// Check if job already added to jobData slice
_, ok := cronsMonitorData.JobDatas[job.Name]
if ok {
Expand All @@ -118,7 +159,7 @@ func checkinJobStarting(ctx context.Context, job *batchv1.Job, cronsMonitorData
logger.Debug().Msgf("Checking in at start of job: %s\n", job.Name)

// All containers running in the pod
checkinId := sentry.CaptureCheckIn(
checkinId := hub.CaptureCheckIn(
&sentry.CheckIn{
MonitorSlug: cronsMonitorData.MonitorSlug,
Status: sentry.CheckInStatusInProgress,
Expand All @@ -133,6 +174,11 @@ func checkinJobStarting(ctx context.Context, job *batchv1.Job, cronsMonitorData
// Sends the checkin event to sentry crons for when a job ends
func checkinJobEnding(ctx context.Context, job *batchv1.Job, cronsMonitorData *CronsMonitorData) error {

hub := sentry.GetHubFromContext(ctx)
if hub == nil {
return errors.New("cannot get hub from context")
}

logger := zerolog.Ctx(ctx)

// Check desired number of pods have succeeded
Expand All @@ -157,7 +203,7 @@ func checkinJobEnding(ctx context.Context, job *batchv1.Job, cronsMonitorData *C
}

logger.Trace().Msgf("checking in at end of job: %s\n", job.Name)
sentry.CaptureCheckIn(
hub.CaptureCheckIn(
&sentry.CheckIn{
ID: jobData.getCheckinId(),
MonitorSlug: cronsMonitorData.MonitorSlug,
Expand Down
3 changes: 3 additions & 0 deletions k8s/errors/cronjob-basic-error.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,9 @@ metadata:
name: cronjob-basic-error
labels:
type: test-pod
"annotations": {
"dsn" : "https://474d9da00094c5e39d6800c01f3aeff6@o4506191942320128.ingest.sentry.io/4506363396816896",
}
Jiahui-Zhang-20 marked this conversation as resolved.
Show resolved Hide resolved
spec:
schedule: "* * * * *"
jobTemplate:
Expand Down
3 changes: 3 additions & 0 deletions k8s/errors/cronjob-basic-success.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,9 @@ metadata:
name: cronjob-basic-success
labels:
type: test-pod
"annotations": {
"dsn" : "https://474d9da00094c5e39d6800c01f3aeff6@o4506191942320128.ingest.sentry.io/4506363396816896",
}
spec:
schedule: "* * * * *"
jobTemplate:
Expand Down
3 changes: 3 additions & 0 deletions k8s/errors/cronjob-late-maybe-error.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,9 @@ metadata:
name: cronjob-late-maybe-error
labels:
type: test-pod
"annotations": {
"dsn" : "https://c6a5dd95a40ab7e4e34a3af43c14f848@o4506191942320128.ingest.sentry.io/4506363401601024",
}
spec:
schedule: "* * * * *"
jobTemplate:
Expand Down
3 changes: 3 additions & 0 deletions k8s/errors/cronjob-late-success.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,9 @@ metadata:
name: cronjob-late-success
labels:
type: test-pod
"annotations": {
"dsn" : "https://c6a5dd95a40ab7e4e34a3af43c14f848@o4506191942320128.ingest.sentry.io/4506363401601024",
}
spec:
schedule: "* * * * *"
jobTemplate:
Expand Down
3 changes: 3 additions & 0 deletions k8s/errors/deployment-create-container-error.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,9 @@ metadata:
labels:
run: deployment-create-container-error
type: test-pod
"annotations": {
"dsn" : "https://474d9da00094c5e39d6800c01f3aeff6@o4506191942320128.ingest.sentry.io/4506363396816896",
}
spec:
replicas: 2
selector:
Expand Down
3 changes: 3 additions & 0 deletions k8s/errors/pod-outofmemory.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,9 @@ metadata:
run: pod-outofmemory
type: test-pod
name: pod-outofmemory
"annotations": {
"dsn" : "https://474d9da00094c5e39d6800c01f3aeff6@o4506191942320128.ingest.sentry.io/4506363396816896",
}
spec:
containers:
- image: python:3.10-alpine
Expand Down
6 changes: 2 additions & 4 deletions mocks_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,17 +8,15 @@ import (
)

type TransportMock struct {
mu sync.Mutex
events []*sentry.Event
lastEvent *sentry.Event
mu sync.Mutex
events []*sentry.Event
}

func (t *TransportMock) Configure(options sentry.ClientOptions) {}
func (t *TransportMock) SendEvent(event *sentry.Event) {
t.mu.Lock()
defer t.mu.Unlock()
t.events = append(t.events, event)
t.lastEvent = event
}
func (t *TransportMock) Flush(timeout time.Duration) bool {
return true
Expand Down
163 changes: 163 additions & 0 deletions sentry_dsn_data.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,163 @@
package main

import (
"context"
"errors"
"sync"

"github.com/getsentry/sentry-go"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

var DSN = "dsn"
Jiahui-Zhang-20 marked this conversation as resolved.
Show resolved Hide resolved

// map from Sentry DSN to Client
type DsnData struct {
Jiahui-Zhang-20 marked this conversation as resolved.
Show resolved Hide resolved
mutex sync.RWMutex
clientMap map[string]*sentry.Client
}

func NewDsnData() *DsnData {
return &DsnData{
mutex: sync.RWMutex{},
clientMap: make(map[string]*sentry.Client),
}
}

// return client if added successfully
func (d *DsnData) AddClient(options sentry.ClientOptions) (*sentry.Client, error) {
d.mutex.Lock()
defer d.mutex.Unlock()

// check if we already encountered this dsn
existingClient, ok := d.clientMap[options.Dsn]
if ok {
return existingClient, errors.New("a client with the given dsn already exists")
Jiahui-Zhang-20 marked this conversation as resolved.
Show resolved Hide resolved
}

// create a new client for the dsn
newClient, err := sentry.NewClient(
sentry.ClientOptions{
Dsn: options.Dsn,
Debug: true,
AttachStacktrace: true,
},
)
if err != nil {
return nil, err
}
d.clientMap[options.Dsn] = newClient

return newClient, nil
}

// retrieve a client with given dsn
func (d *DsnData) GetClient(dsn string) (*sentry.Client, error) {
Jiahui-Zhang-20 marked this conversation as resolved.
Show resolved Hide resolved
d.mutex.RLock()
defer d.mutex.RUnlock()

// check if we have this dsn
existingClient, ok := d.clientMap[dsn]
if ok {
return existingClient, nil
} else {
return nil, errors.New("a client with given DSN does not exist")
}
}

// recursive function to find if there is a DSN annotation
func searchDsn(ctx context.Context, object metav1.ObjectMeta) (string, error) {

clientset, err := getClientsetFromContext(ctx)
if err != nil {
return "", err
}

dsn, ok := object.Annotations[DSN]
if ok {
return dsn, nil
}

if len(object.OwnerReferences) == 0 {
return "", nil
}

owningRef := object.OwnerReferences[0]
switch kind := owningRef.Kind; kind {
Jiahui-Zhang-20 marked this conversation as resolved.
Show resolved Hide resolved
case "Pod":
parentPod, err := clientset.CoreV1().Pods(object.Namespace).Get(context.Background(), owningRef.Name, metav1.GetOptions{})
if err != nil {
return "", err
}
return searchDsn(ctx, parentPod.ObjectMeta)
case "ReplicaSet":
parentReplicaSet, err := clientset.AppsV1().ReplicaSets(object.Namespace).Get(context.Background(), owningRef.Name, metav1.GetOptions{})
if err != nil {
return "", err
}
return searchDsn(ctx, parentReplicaSet.ObjectMeta)
case "Deployment":
parentDeployment, err := clientset.AppsV1().Deployments(object.Namespace).Get(context.Background(), owningRef.Name, metav1.GetOptions{})
if err != nil {
return "", err
}
return searchDsn(ctx, parentDeployment.ObjectMeta)
case "Job":
parentJob, err := clientset.BatchV1().Jobs(object.Namespace).Get(context.Background(), owningRef.Name, metav1.GetOptions{})
if err != nil {
return "", err
}
return searchDsn(ctx, parentJob.ObjectMeta)
case "CronJob":
parentCronjob, err := clientset.BatchV1().CronJobs(object.Namespace).Get(context.Background(), owningRef.Name, metav1.GetOptions{})
if err != nil {
return "", err
}
return searchDsn(ctx, parentCronjob.ObjectMeta)
default:
return "", errors.New("unsupported object kind encountered")
}
}

func findObjectMeta(ctx context.Context, kind string, namespace string, name string) (*metav1.ObjectMeta, error) {

clientset, err := getClientsetFromContext(ctx)
if err != nil {
return nil, err
}

switch kind {
case "Pod":
parentPod, err := clientset.CoreV1().Pods(namespace).Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return &parentPod.ObjectMeta, nil
case "ReplicaSet":
parentReplicaSet, err := clientset.AppsV1().ReplicaSets(namespace).Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return &parentReplicaSet.ObjectMeta, nil
case "Deployment":
parentDeployment, err := clientset.AppsV1().Deployments(namespace).Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return &parentDeployment.ObjectMeta, nil
case "Job":
parentJob, err := clientset.BatchV1().Jobs(namespace).Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return &parentJob.ObjectMeta, nil
case "CronJob":
parentCronjob, err := clientset.BatchV1().CronJobs(namespace).Get(context.Background(), name, metav1.GetOptions{})
if err != nil {
return nil, err
}
return &parentCronjob.ObjectMeta, nil
default:
return nil, errors.New("unsupported object kind encountered")
}
}
Loading
Loading