From 26a375910ca260d75e13ef781843d843c6ad683d Mon Sep 17 00:00:00 2001 From: Jack Zhang Date: Fri, 8 Dec 2023 19:46:30 -0800 Subject: [PATCH 1/9] allow custom dsn in manifest --- crons.go | 65 ++++++++-- k8s/errors/cronjob-basic-error.yaml | 3 + k8s/errors/cronjob-basic-success.yaml | 3 + k8s/errors/cronjob-late-maybe-error.yaml | 3 + k8s/errors/cronjob-late-success.yaml | 3 + .../deployment-create-container-error.yaml | 3 + k8s/errors/pod-outofmemory.yaml | 3 + sentry_dsn_data.go | 120 ++++++++++++++++++ watcher_events.go | 32 +++++ watcher_pods.go | 29 +++++ 10 files changed, 252 insertions(+), 12 deletions(-) create mode 100644 sentry_dsn_data.go diff --git a/crons.go b/crons.go index 7bb8eed..8746960 100644 --- a/crons.go +++ b/crons.go @@ -78,6 +78,11 @@ func startCronsInformers(ctx context.Context, namespace string) error { // or if the job exited func runSentryCronsCheckin(ctx context.Context, job *batchv1.Job, eventHandlerType EventHandlerType) error { + hub := sentry.GetHubFromContext(ctx) + if hub == nil { + return errors.New("cannot get hub from context") + } + // Try to find the cronJob name that owns the job // in order to get the crons monitor data if len(job.OwnerReferences) == 0 { @@ -92,16 +97,42 @@ func runSentryCronsCheckin(ctx context.Context, job *batchv1.Job, eventHandlerTy return errors.New("cannot find cronJob data") } - // The job just begun so check in to start - if job.Status.Active == 0 && job.Status.Succeeded == 0 && job.Status.Failed == 0 { - // Add the job to the cronJob informer data - checkinJobStarting(ctx, job, cronsMonitorData) - } else if job.Status.Active > 0 { - return nil - } else if job.Status.Failed > 0 || job.Status.Succeeded > 0 { - checkinJobEnding(ctx, job, cronsMonitorData) - return nil // finished - } + hub.WithScope(func(scope *sentry.Scope) { + altDsn, err := searchDsn(ctx, job.ObjectMeta) + if err != nil { + return + } + + fmt.Println("found alt dsn: " + altDsn) + + // if we did find an alternative DSN + if altDsn != "" { + // attempt to retrieve the corresponding client + client, _ := dsnData.GetClient(altDsn) + + if client == nil { + client, err = dsnData.AddClient(altDsn) + if err != nil { + return + } + } + + // bind the alternative client to the top layer + hub.BindClient(client) + } + + // The job just begun so check in to start + if job.Status.Active == 0 && job.Status.Succeeded == 0 && job.Status.Failed == 0 { + // Add the job to the cronJob informer data + checkinJobStarting(ctx, job, cronsMonitorData) + } else if job.Status.Active > 0 { + return + } else if job.Status.Failed > 0 || job.Status.Succeeded > 0 { + checkinJobEnding(ctx, job, cronsMonitorData) + return // finished + } + }) + return nil } @@ -110,6 +141,11 @@ func checkinJobStarting(ctx context.Context, job *batchv1.Job, cronsMonitorData logger := zerolog.Ctx(ctx) + hub := sentry.GetHubFromContext(ctx) + if hub == nil { + return errors.New("cannot get hub from context") + } + // Check if job already added to jobData slice _, ok := cronsMonitorData.JobDatas[job.Name] if ok { @@ -118,7 +154,7 @@ func checkinJobStarting(ctx context.Context, job *batchv1.Job, cronsMonitorData logger.Debug().Msgf("Checking in at start of job: %s\n", job.Name) // All containers running in the pod - checkinId := sentry.CaptureCheckIn( + checkinId := hub.CaptureCheckIn( &sentry.CheckIn{ MonitorSlug: cronsMonitorData.MonitorSlug, Status: sentry.CheckInStatusInProgress, @@ -133,6 +169,11 @@ func checkinJobStarting(ctx context.Context, job *batchv1.Job, cronsMonitorData // Sends the checkin event to sentry crons for when a job ends func checkinJobEnding(ctx context.Context, job *batchv1.Job, cronsMonitorData *CronsMonitorData) error { + hub := sentry.GetHubFromContext(ctx) + if hub == nil { + return errors.New("cannot get hub from context") + } + logger := zerolog.Ctx(ctx) // Check desired number of pods have succeeded @@ -157,7 +198,7 @@ func checkinJobEnding(ctx context.Context, job *batchv1.Job, cronsMonitorData *C } logger.Trace().Msgf("checking in at end of job: %s\n", job.Name) - sentry.CaptureCheckIn( + hub.CaptureCheckIn( &sentry.CheckIn{ ID: jobData.getCheckinId(), MonitorSlug: cronsMonitorData.MonitorSlug, diff --git a/k8s/errors/cronjob-basic-error.yaml b/k8s/errors/cronjob-basic-error.yaml index 22cc423..29b8b8e 100644 --- a/k8s/errors/cronjob-basic-error.yaml +++ b/k8s/errors/cronjob-basic-error.yaml @@ -4,6 +4,9 @@ metadata: name: cronjob-basic-error labels: type: test-pod + "annotations": { + "dsn" : "https://474d9da00094c5e39d6800c01f3aeff6@o4506191942320128.ingest.sentry.io/4506363396816896", + } spec: schedule: "* * * * *" jobTemplate: diff --git a/k8s/errors/cronjob-basic-success.yaml b/k8s/errors/cronjob-basic-success.yaml index 2292483..e2fd243 100644 --- a/k8s/errors/cronjob-basic-success.yaml +++ b/k8s/errors/cronjob-basic-success.yaml @@ -4,6 +4,9 @@ metadata: name: cronjob-basic-success labels: type: test-pod + "annotations": { + "dsn" : "https://474d9da00094c5e39d6800c01f3aeff6@o4506191942320128.ingest.sentry.io/4506363396816896", + } spec: schedule: "* * * * *" jobTemplate: diff --git a/k8s/errors/cronjob-late-maybe-error.yaml b/k8s/errors/cronjob-late-maybe-error.yaml index ccb9a22..7986892 100644 --- a/k8s/errors/cronjob-late-maybe-error.yaml +++ b/k8s/errors/cronjob-late-maybe-error.yaml @@ -4,6 +4,9 @@ metadata: name: cronjob-late-maybe-error labels: type: test-pod + "annotations": { + "dsn" : "https://c6a5dd95a40ab7e4e34a3af43c14f848@o4506191942320128.ingest.sentry.io/4506363401601024", + } spec: schedule: "* * * * *" jobTemplate: diff --git a/k8s/errors/cronjob-late-success.yaml b/k8s/errors/cronjob-late-success.yaml index 19e07b9..15b2f72 100644 --- a/k8s/errors/cronjob-late-success.yaml +++ b/k8s/errors/cronjob-late-success.yaml @@ -4,6 +4,9 @@ metadata: name: cronjob-late-success labels: type: test-pod + "annotations": { + "dsn" : "https://c6a5dd95a40ab7e4e34a3af43c14f848@o4506191942320128.ingest.sentry.io/4506363401601024", + } spec: schedule: "* * * * *" jobTemplate: diff --git a/k8s/errors/deployment-create-container-error.yaml b/k8s/errors/deployment-create-container-error.yaml index 003fbb1..8e41b53 100644 --- a/k8s/errors/deployment-create-container-error.yaml +++ b/k8s/errors/deployment-create-container-error.yaml @@ -5,6 +5,9 @@ metadata: labels: run: deployment-create-container-error type: test-pod + "annotations": { + "dsn" : "https://474d9da00094c5e39d6800c01f3aeff6@o4506191942320128.ingest.sentry.io/4506363396816896", + } spec: replicas: 2 selector: diff --git a/k8s/errors/pod-outofmemory.yaml b/k8s/errors/pod-outofmemory.yaml index 3f9a991..735e2f1 100644 --- a/k8s/errors/pod-outofmemory.yaml +++ b/k8s/errors/pod-outofmemory.yaml @@ -6,6 +6,9 @@ metadata: run: pod-outofmemory type: test-pod name: pod-outofmemory + "annotations": { + "dsn" : "https://474d9da00094c5e39d6800c01f3aeff6@o4506191942320128.ingest.sentry.io/4506363396816896", + } spec: containers: - image: python:3.10-alpine diff --git a/sentry_dsn_data.go b/sentry_dsn_data.go new file mode 100644 index 0000000..0281729 --- /dev/null +++ b/sentry_dsn_data.go @@ -0,0 +1,120 @@ +package main + +import ( + "context" + "errors" + "sync" + + "github.com/getsentry/sentry-go" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +var DSN = "dsn" + +// map from Sentry DSN to Client +type DsnData struct { + mutex sync.RWMutex + clientMap map[string]*sentry.Client +} + +func NewDsnData() *DsnData { + return &DsnData{ + mutex: sync.RWMutex{}, + clientMap: make(map[string]*sentry.Client), + } +} + +// return client if added successfully +func (d *DsnData) AddClient(dsn string) (*sentry.Client, error) { + d.mutex.Lock() + defer d.mutex.Unlock() + + // check if we already encountered this dsn + existingClient, ok := d.clientMap[dsn] + if ok { + return existingClient, errors.New("a client with the given dsn already exists") + } + + // create a new client for the dsn + newClient, err := sentry.NewClient( + sentry.ClientOptions{ + Dsn: dsn, + Debug: true, + AttachStacktrace: true, + }, + ) + if err != nil { + return nil, err + } + d.clientMap[dsn] = newClient + + return newClient, nil +} + +// retrieve a client with given dsn +func (d *DsnData) GetClient(dsn string) (*sentry.Client, error) { + d.mutex.RLock() + defer d.mutex.RUnlock() + + // check if we have this dsn + existingClient, ok := d.clientMap[dsn] + if ok { + return existingClient, nil + } else { + return nil, errors.New("a client with given DSN does not exist") + } +} + +// recursive function to find if there is a DSN annotation +func searchDsn(ctx context.Context, object metav1.ObjectMeta) (string, error) { + + clientset, err := getClientsetFromContext(ctx) + if err != nil { + return "", err + } + + dsn, ok := object.Annotations[DSN] + if ok { + return dsn, nil + } + + if len(object.OwnerReferences) == 0 { + return "", nil + } + + owningRef := object.OwnerReferences[0] + switch kind := owningRef.Kind; kind { + case "Pod": + parentPod, err := clientset.CoreV1().Pods(object.Namespace).Get(context.Background(), owningRef.Name, metav1.GetOptions{}) + if err != nil { + return "", err + } + return searchDsn(ctx, parentPod.ObjectMeta) + case "ReplicaSet": + parentReplicaSet, err := clientset.AppsV1().ReplicaSets(object.Namespace).Get(context.Background(), owningRef.Name, metav1.GetOptions{}) + if err != nil { + return "", err + } + return searchDsn(ctx, parentReplicaSet.ObjectMeta) + case "Deployment": + parentDeployment, err := clientset.AppsV1().Deployments(object.Namespace).Get(context.Background(), owningRef.Name, metav1.GetOptions{}) + if err != nil { + return "", err + } + return searchDsn(ctx, parentDeployment.ObjectMeta) + case "Job": + parentJob, err := clientset.BatchV1().Jobs(object.Namespace).Get(context.Background(), owningRef.Name, metav1.GetOptions{}) + if err != nil { + return "", err + } + return searchDsn(ctx, parentJob.ObjectMeta) + case "CronJob": + parentCronjob, err := clientset.BatchV1().CronJobs(object.Namespace).Get(context.Background(), owningRef.Name, metav1.GetOptions{}) + if err != nil { + return "", err + } + return searchDsn(ctx, parentCronjob.ObjectMeta) + default: + return "", errors.New("unsupported object kind encountered") + } +} diff --git a/watcher_events.go b/watcher_events.go index 1e53dae..2dd7892 100644 --- a/watcher_events.go +++ b/watcher_events.go @@ -140,12 +140,44 @@ func handleWatchEvent(ctx context.Context, event *watch.Event, cutoffTime metav1 return } hub.WithScope(func(scope *sentry.Scope) { + + // search for alternative DSN in the annotations + altDsn, err := searchDsn(ctx, eventObject.ObjectMeta) + if err != nil { + return + } + + // if we did find an alternative DSN + if altDsn != "" { + // attempt to retrieve the corresponding client + client, err := dsnData.GetClient(altDsn) + if err != nil { + return + } + + if client == nil { + client, err = dsnData.AddClient(altDsn) + if err != nil { + return + } + } + + // bind the alternative client to the top layer + hub.BindClient(client) + fmt.Println("binding client with DSN: " + altDsn) + } else { + fmt.Println("Using default dsn: " + hub.Client().Options().Dsn) + } + setWatcherTag(scope, eventsWatcherName) sentryEvent := handleGeneralEvent(ctx, eventObject, scope) if sentryEvent != nil { + fmt.Println("the event temp dsn is " + hub.Client().Options().Dsn) hub.CaptureEvent(sentryEvent) } }) + fmt.Println("the event dsn returns to " + hub.Client().Options().Dsn) + } func watchEventsInNamespace(ctx context.Context, namespace string, watchSince time.Time) (err error) { diff --git a/watcher_pods.go b/watcher_pods.go index b64dad4..23c8236 100644 --- a/watcher_pods.go +++ b/watcher_pods.go @@ -20,6 +20,7 @@ import ( const podsWatcherName = "pods" var cronsMetaData = NewCronsMetaData() +var dsnData = NewDsnData() func handlePodTerminationEvent(ctx context.Context, containerStatus *v1.ContainerStatus, pod *v1.Pod, scope *sentry.Scope) *sentry.Event { logger := zerolog.Ctx(ctx) @@ -114,12 +115,40 @@ func handlePodWatchEvent(ctx context.Context, event *watch.Event) { } hub.WithScope(func(scope *sentry.Scope) { + + // search for alternative DSN in the annotations + altDsn, err := searchDsn(ctx, podObject.ObjectMeta) + if err != nil { + return + } + + // if we did find an alternative DSN + if altDsn != "" { + // attempt to retrieve the corresponding client + client, err := dsnData.GetClient(altDsn) + if err != nil { + return + } + + if client == nil { + client, err = dsnData.AddClient(altDsn) + if err != nil { + return + } + } + + // bind the alternative client to the top layer + hub.BindClient(client) + } + setWatcherTag(scope, podsWatcherName) sentryEvent := handlePodTerminationEvent(ctx, &status, podObject, scope) if sentryEvent != nil { + fmt.Println("the pod temp dsn is " + hub.Client().Options().Dsn) hub.CaptureEvent(sentryEvent) } }) + fmt.Println("the pod dsn returns to " + hub.Client().Options().Dsn) } } From 4f2adb872374a6b9e9a2d41b05cca3bb6a51b117 Mon Sep 17 00:00:00 2001 From: Jack Zhang Date: Sat, 9 Dec 2023 00:45:01 -0800 Subject: [PATCH 2/9] fix errors of sending to wrong projects --- crons.go | 7 +++++-- sentry_dsn_data.go | 43 +++++++++++++++++++++++++++++++++++++++++++ watcher_events.go | 27 ++++++++++++++++----------- watcher_pods.go | 7 ++++--- 4 files changed, 68 insertions(+), 16 deletions(-) diff --git a/crons.go b/crons.go index 8746960..beea456 100644 --- a/crons.go +++ b/crons.go @@ -83,6 +83,9 @@ func runSentryCronsCheckin(ctx context.Context, job *batchv1.Job, eventHandlerTy return errors.New("cannot get hub from context") } + // to avoid concurrency issue + hub = hub.Clone() + // Try to find the cronJob name that owns the job // in order to get the crons monitor data if len(job.OwnerReferences) == 0 { @@ -103,8 +106,6 @@ func runSentryCronsCheckin(ctx context.Context, job *batchv1.Job, eventHandlerTy return } - fmt.Println("found alt dsn: " + altDsn) - // if we did find an alternative DSN if altDsn != "" { // attempt to retrieve the corresponding client @@ -121,6 +122,8 @@ func runSentryCronsCheckin(ctx context.Context, job *batchv1.Job, eventHandlerTy hub.BindClient(client) } + // pass clone hub down with context + ctx = sentry.SetHubOnContext(ctx, hub) // The job just begun so check in to start if job.Status.Active == 0 && job.Status.Succeeded == 0 && job.Status.Failed == 0 { // Add the job to the cronJob informer data diff --git a/sentry_dsn_data.go b/sentry_dsn_data.go index 0281729..62e7a78 100644 --- a/sentry_dsn_data.go +++ b/sentry_dsn_data.go @@ -118,3 +118,46 @@ func searchDsn(ctx context.Context, object metav1.ObjectMeta) (string, error) { return "", errors.New("unsupported object kind encountered") } } + +func findObjectMeta(ctx context.Context, kind string, namespace string, name string) (*metav1.ObjectMeta, error) { + + clientset, err := getClientsetFromContext(ctx) + if err != nil { + return nil, err + } + + switch kind { + case "Pod": + parentPod, err := clientset.CoreV1().Pods(namespace).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return &parentPod.ObjectMeta, nil + case "ReplicaSet": + parentReplicaSet, err := clientset.AppsV1().ReplicaSets(namespace).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return &parentReplicaSet.ObjectMeta, nil + case "Deployment": + parentDeployment, err := clientset.AppsV1().Deployments(namespace).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return &parentDeployment.ObjectMeta, nil + case "Job": + parentJob, err := clientset.BatchV1().Jobs(namespace).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return &parentJob.ObjectMeta, nil + case "CronJob": + parentCronjob, err := clientset.BatchV1().CronJobs(namespace).Get(context.Background(), name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return &parentCronjob.ObjectMeta, nil + default: + return nil, errors.New("unsupported object kind encountered") + } +} diff --git a/watcher_events.go b/watcher_events.go index 2dd7892..bcee439 100644 --- a/watcher_events.go +++ b/watcher_events.go @@ -139,17 +139,26 @@ func handleWatchEvent(ctx context.Context, event *watch.Event, cutoffTime metav1 logger.Error().Msgf("Cannot get Sentry hub from context") return } + + // To avoid concurrency issue + hub = hub.Clone() hub.WithScope(func(scope *sentry.Scope) { - // search for alternative DSN in the annotations - altDsn, err := searchDsn(ctx, eventObject.ObjectMeta) + // Find the object meta that the event is about + objectMeta, err := findObjectMeta(ctx, eventObject.InvolvedObject.Kind, eventObject.InvolvedObject.Namespace, eventObject.InvolvedObject.Name) + if err != nil { + return + } + + // Search for alternative DSN in the annotations + altDsn, err := searchDsn(ctx, *objectMeta) if err != nil { return } - // if we did find an alternative DSN + // If we did find an alternative DSN if altDsn != "" { - // attempt to retrieve the corresponding client + // Attempt to retrieve the corresponding client client, err := dsnData.GetClient(altDsn) if err != nil { return @@ -162,22 +171,18 @@ func handleWatchEvent(ctx context.Context, event *watch.Event, cutoffTime metav1 } } - // bind the alternative client to the top layer + // Bind the alternative client to the top layer hub.BindClient(client) - fmt.Println("binding client with DSN: " + altDsn) - } else { - fmt.Println("Using default dsn: " + hub.Client().Options().Dsn) } + // Pass down clone context + ctx = sentry.SetHubOnContext(ctx, hub) setWatcherTag(scope, eventsWatcherName) sentryEvent := handleGeneralEvent(ctx, eventObject, scope) if sentryEvent != nil { - fmt.Println("the event temp dsn is " + hub.Client().Options().Dsn) hub.CaptureEvent(sentryEvent) } }) - fmt.Println("the event dsn returns to " + hub.Client().Options().Dsn) - } func watchEventsInNamespace(ctx context.Context, namespace string, watchSince time.Time) (err error) { diff --git a/watcher_pods.go b/watcher_pods.go index 23c8236..0eacd26 100644 --- a/watcher_pods.go +++ b/watcher_pods.go @@ -104,6 +104,8 @@ func handlePodWatchEvent(ctx context.Context, event *watch.Event) { logger.Error().Msgf("Cannot get Sentry hub from context") return } + // to avoid concurrency issue + hub = hub.Clone() containerStatuses := podObject.Status.ContainerStatuses logger.Trace().Msgf("Container statuses: %#v\n", containerStatuses) @@ -113,7 +115,6 @@ func handlePodWatchEvent(ctx context.Context, event *watch.Event) { // Ignore non-Terminated statuses continue } - hub.WithScope(func(scope *sentry.Scope) { // search for alternative DSN in the annotations @@ -141,14 +142,14 @@ func handlePodWatchEvent(ctx context.Context, event *watch.Event) { hub.BindClient(client) } + // pass down clone context + ctx = sentry.SetHubOnContext(ctx, hub) setWatcherTag(scope, podsWatcherName) sentryEvent := handlePodTerminationEvent(ctx, &status, podObject, scope) if sentryEvent != nil { - fmt.Println("the pod temp dsn is " + hub.Client().Options().Dsn) hub.CaptureEvent(sentryEvent) } }) - fmt.Println("the pod dsn returns to " + hub.Client().Options().Dsn) } } From 5d040e71f6fd76619d570542271d0405e29a7351 Mon Sep 17 00:00:00 2001 From: Jack Zhang Date: Sat, 9 Dec 2023 01:27:31 -0800 Subject: [PATCH 3/9] clients inherit old options with only dsn changed --- crons.go | 4 +++- sentry_dsn_data.go | 8 ++++---- watcher_events.go | 4 +++- watcher_pods.go | 4 +++- 4 files changed, 13 insertions(+), 7 deletions(-) diff --git a/crons.go b/crons.go index beea456..038a0a8 100644 --- a/crons.go +++ b/crons.go @@ -112,7 +112,9 @@ func runSentryCronsCheckin(ctx context.Context, job *batchv1.Job, eventHandlerTy client, _ := dsnData.GetClient(altDsn) if client == nil { - client, err = dsnData.AddClient(altDsn) + newOptions := hub.Client().Options() + newOptions.Dsn = altDsn + client, err = dsnData.AddClient(newOptions) if err != nil { return } diff --git a/sentry_dsn_data.go b/sentry_dsn_data.go index 62e7a78..83b206c 100644 --- a/sentry_dsn_data.go +++ b/sentry_dsn_data.go @@ -25,12 +25,12 @@ func NewDsnData() *DsnData { } // return client if added successfully -func (d *DsnData) AddClient(dsn string) (*sentry.Client, error) { +func (d *DsnData) AddClient(options sentry.ClientOptions) (*sentry.Client, error) { d.mutex.Lock() defer d.mutex.Unlock() // check if we already encountered this dsn - existingClient, ok := d.clientMap[dsn] + existingClient, ok := d.clientMap[options.Dsn] if ok { return existingClient, errors.New("a client with the given dsn already exists") } @@ -38,7 +38,7 @@ func (d *DsnData) AddClient(dsn string) (*sentry.Client, error) { // create a new client for the dsn newClient, err := sentry.NewClient( sentry.ClientOptions{ - Dsn: dsn, + Dsn: options.Dsn, Debug: true, AttachStacktrace: true, }, @@ -46,7 +46,7 @@ func (d *DsnData) AddClient(dsn string) (*sentry.Client, error) { if err != nil { return nil, err } - d.clientMap[dsn] = newClient + d.clientMap[options.Dsn] = newClient return newClient, nil } diff --git a/watcher_events.go b/watcher_events.go index bcee439..d4d5882 100644 --- a/watcher_events.go +++ b/watcher_events.go @@ -165,7 +165,9 @@ func handleWatchEvent(ctx context.Context, event *watch.Event, cutoffTime metav1 } if client == nil { - client, err = dsnData.AddClient(altDsn) + newOptions := hub.Client().Options() + newOptions.Dsn = altDsn + client, err = dsnData.AddClient(newOptions) if err != nil { return } diff --git a/watcher_pods.go b/watcher_pods.go index 0eacd26..d516300 100644 --- a/watcher_pods.go +++ b/watcher_pods.go @@ -132,7 +132,9 @@ func handlePodWatchEvent(ctx context.Context, event *watch.Event) { } if client == nil { - client, err = dsnData.AddClient(altDsn) + newOptions := hub.Client().Options() + newOptions.Dsn = altDsn + client, err = dsnData.AddClient(newOptions) if err != nil { return } From 062583ee3ba239ae536ccb604a08d4c9f77474b8 Mon Sep 17 00:00:00 2001 From: Jack Zhang Date: Sat, 9 Dec 2023 01:41:04 -0800 Subject: [PATCH 4/9] change transport mock to remove last event --- mocks_test.go | 6 ++---- watcher_events_test.go | 1 + 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/mocks_test.go b/mocks_test.go index 655d05e..6177dac 100644 --- a/mocks_test.go +++ b/mocks_test.go @@ -8,9 +8,8 @@ import ( ) type TransportMock struct { - mu sync.Mutex - events []*sentry.Event - lastEvent *sentry.Event + mu sync.Mutex + events []*sentry.Event } func (t *TransportMock) Configure(options sentry.ClientOptions) {} @@ -18,7 +17,6 @@ func (t *TransportMock) SendEvent(event *sentry.Event) { t.mu.Lock() defer t.mu.Unlock() t.events = append(t.events, event) - t.lastEvent = event } func (t *TransportMock) Flush(timeout time.Duration) bool { return true diff --git a/watcher_events_test.go b/watcher_events_test.go index 4448bea..32d1a8b 100644 --- a/watcher_events_test.go +++ b/watcher_events_test.go @@ -101,6 +101,7 @@ func TestHandleWatchEvent(t *testing.T) { // The Sentry event message should match that of the event message expectedMsg := "Fake Message: TestHandleWatchEvent" + if events[0].Message != expectedMsg { t.Errorf("received %s, wanted %s", events[0].Message, expectedMsg) } From c17f4ef8f9c1b70c9cb957eb44769eb8771e1aef Mon Sep 17 00:00:00 2001 From: Jack Zhang Date: Mon, 11 Dec 2023 13:43:36 -0500 Subject: [PATCH 5/9] refactor dsn code --- crons.go | 28 +---- k8s/errors/cronjob-basic-error.yaml | 2 +- k8s/errors/cronjob-basic-success.yaml | 2 +- k8s/errors/cronjob-late-maybe-error.yaml | 2 +- k8s/errors/cronjob-late-success.yaml | 2 +- .../deployment-create-container-error.yaml | 2 +- k8s/errors/pod-outofmemory.yaml | 2 +- sentry_dsn_data.go | 109 +++++++++--------- watcher_events.go | 27 +---- watcher_pods.go | 31 +---- 10 files changed, 72 insertions(+), 135 deletions(-) diff --git a/crons.go b/crons.go index 038a0a8..ddbeb77 100644 --- a/crons.go +++ b/crons.go @@ -83,7 +83,7 @@ func runSentryCronsCheckin(ctx context.Context, job *batchv1.Job, eventHandlerTy return errors.New("cannot get hub from context") } - // to avoid concurrency issue + // To avoid concurrency issue hub = hub.Clone() // Try to find the cronJob name that owns the job @@ -101,30 +101,14 @@ func runSentryCronsCheckin(ctx context.Context, job *batchv1.Job, eventHandlerTy } hub.WithScope(func(scope *sentry.Scope) { - altDsn, err := searchDsn(ctx, job.ObjectMeta) - if err != nil { - return - } - - // if we did find an alternative DSN - if altDsn != "" { - // attempt to retrieve the corresponding client - client, _ := dsnData.GetClient(altDsn) - - if client == nil { - newOptions := hub.Client().Options() - newOptions.Dsn = altDsn - client, err = dsnData.AddClient(newOptions) - if err != nil { - return - } - } - // bind the alternative client to the top layer + // If DSN annotation provided, we bind a new client with that DSN + client, ok := dsnData.GetClientFromObject(ctx, &job.ObjectMeta, hub.Clone().Client().Options()) + if ok { hub.BindClient(client) } - // pass clone hub down with context + // Pass clone hub down with context ctx = sentry.SetHubOnContext(ctx, hub) // The job just begun so check in to start if job.Status.Active == 0 && job.Status.Succeeded == 0 && job.Status.Failed == 0 { @@ -134,7 +118,7 @@ func runSentryCronsCheckin(ctx context.Context, job *batchv1.Job, eventHandlerTy return } else if job.Status.Failed > 0 || job.Status.Succeeded > 0 { checkinJobEnding(ctx, job, cronsMonitorData) - return // finished + return // Finished } }) diff --git a/k8s/errors/cronjob-basic-error.yaml b/k8s/errors/cronjob-basic-error.yaml index 29b8b8e..a219f80 100644 --- a/k8s/errors/cronjob-basic-error.yaml +++ b/k8s/errors/cronjob-basic-error.yaml @@ -5,7 +5,7 @@ metadata: labels: type: test-pod "annotations": { - "dsn" : "https://474d9da00094c5e39d6800c01f3aeff6@o4506191942320128.ingest.sentry.io/4506363396816896", + "k8s.sentry.io/dsn" : "https://474d9da00094c5e39d6800c01f3aeff6@o4506191942320128.ingest.sentry.io/4506363396816896", } spec: schedule: "* * * * *" diff --git a/k8s/errors/cronjob-basic-success.yaml b/k8s/errors/cronjob-basic-success.yaml index e2fd243..b1e8540 100644 --- a/k8s/errors/cronjob-basic-success.yaml +++ b/k8s/errors/cronjob-basic-success.yaml @@ -5,7 +5,7 @@ metadata: labels: type: test-pod "annotations": { - "dsn" : "https://474d9da00094c5e39d6800c01f3aeff6@o4506191942320128.ingest.sentry.io/4506363396816896", + "k8s.sentry.io/dsn" : "https://474d9da00094c5e39d6800c01f3aeff6@o4506191942320128.ingest.sentry.io/4506363396816896", } spec: schedule: "* * * * *" diff --git a/k8s/errors/cronjob-late-maybe-error.yaml b/k8s/errors/cronjob-late-maybe-error.yaml index 7986892..b01adb1 100644 --- a/k8s/errors/cronjob-late-maybe-error.yaml +++ b/k8s/errors/cronjob-late-maybe-error.yaml @@ -5,7 +5,7 @@ metadata: labels: type: test-pod "annotations": { - "dsn" : "https://c6a5dd95a40ab7e4e34a3af43c14f848@o4506191942320128.ingest.sentry.io/4506363401601024", + "k8s.sentry.io/dsn" : "https://c6a5dd95a40ab7e4e34a3af43c14f848@o4506191942320128.ingest.sentry.io/4506363401601024", } spec: schedule: "* * * * *" diff --git a/k8s/errors/cronjob-late-success.yaml b/k8s/errors/cronjob-late-success.yaml index 15b2f72..2bfdc8b 100644 --- a/k8s/errors/cronjob-late-success.yaml +++ b/k8s/errors/cronjob-late-success.yaml @@ -5,7 +5,7 @@ metadata: labels: type: test-pod "annotations": { - "dsn" : "https://c6a5dd95a40ab7e4e34a3af43c14f848@o4506191942320128.ingest.sentry.io/4506363401601024", + "k8s.sentry.io/dsn" : "https://c6a5dd95a40ab7e4e34a3af43c14f848@o4506191942320128.ingest.sentry.io/4506363401601024", } spec: schedule: "* * * * *" diff --git a/k8s/errors/deployment-create-container-error.yaml b/k8s/errors/deployment-create-container-error.yaml index 8e41b53..98de5a8 100644 --- a/k8s/errors/deployment-create-container-error.yaml +++ b/k8s/errors/deployment-create-container-error.yaml @@ -6,7 +6,7 @@ metadata: run: deployment-create-container-error type: test-pod "annotations": { - "dsn" : "https://474d9da00094c5e39d6800c01f3aeff6@o4506191942320128.ingest.sentry.io/4506363396816896", + "k8s.sentry.io/dsn" : "https://474d9da00094c5e39d6800c01f3aeff6@o4506191942320128.ingest.sentry.io/4506363396816896", } spec: replicas: 2 diff --git a/k8s/errors/pod-outofmemory.yaml b/k8s/errors/pod-outofmemory.yaml index 735e2f1..6c8be41 100644 --- a/k8s/errors/pod-outofmemory.yaml +++ b/k8s/errors/pod-outofmemory.yaml @@ -7,7 +7,7 @@ metadata: type: test-pod name: pod-outofmemory "annotations": { - "dsn" : "https://474d9da00094c5e39d6800c01f3aeff6@o4506191942320128.ingest.sentry.io/4506363396816896", + "k8s.sentry.io/dsn" : "https://474d9da00094c5e39d6800c01f3aeff6@o4506191942320128.ingest.sentry.io/4506363396816896", } spec: containers: diff --git a/sentry_dsn_data.go b/sentry_dsn_data.go index 83b206c..dc6c0ba 100644 --- a/sentry_dsn_data.go +++ b/sentry_dsn_data.go @@ -9,30 +9,31 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) -var DSN = "dsn" +var DSNAnnotation = "k8s.sentry.io/dsn" // map from Sentry DSN to Client -type DsnData struct { +type DsnClientMapping struct { mutex sync.RWMutex clientMap map[string]*sentry.Client } -func NewDsnData() *DsnData { - return &DsnData{ +func NewDsnData() *DsnClientMapping { + return &DsnClientMapping{ mutex: sync.RWMutex{}, clientMap: make(map[string]*sentry.Client), } } // return client if added successfully -func (d *DsnData) AddClient(options sentry.ClientOptions) (*sentry.Client, error) { +// (also returns client if already exists) +func (d *DsnClientMapping) AddClientToMap(options sentry.ClientOptions) (*sentry.Client, error) { d.mutex.Lock() defer d.mutex.Unlock() // check if we already encountered this dsn existingClient, ok := d.clientMap[options.Dsn] if ok { - return existingClient, errors.New("a client with the given dsn already exists") + return existingClient, nil } // create a new client for the dsn @@ -52,28 +53,49 @@ func (d *DsnData) AddClient(options sentry.ClientOptions) (*sentry.Client, error } // retrieve a client with given dsn -func (d *DsnData) GetClient(dsn string) (*sentry.Client, error) { +func (d *DsnClientMapping) GetClientFromMap(dsn string) (*sentry.Client, bool) { d.mutex.RLock() defer d.mutex.RUnlock() // check if we have this dsn existingClient, ok := d.clientMap[dsn] if ok { - return existingClient, nil + return existingClient, true } else { - return nil, errors.New("a client with given DSN does not exist") + return nil, false } } -// recursive function to find if there is a DSN annotation -func searchDsn(ctx context.Context, object metav1.ObjectMeta) (string, error) { +func (d *DsnClientMapping) GetClientFromObject(ctx context.Context, objectMeta *metav1.ObjectMeta, clientOptions sentry.ClientOptions) (*sentry.Client, bool) { - clientset, err := getClientsetFromContext(ctx) + // find DSN annotation from the object + altDsn, err := searchDsn(ctx, objectMeta) if err != nil { - return "", err + return nil, false + } + + // if we did find an alternative DSN + if altDsn != "" { + // attempt to retrieve the corresponding client + client, _ := dsnData.GetClientFromMap(altDsn) + if client == nil { + // create new client + clientOptions.Dsn = altDsn + client, err = dsnData.AddClientToMap(clientOptions) + if err != nil { + return nil, false + } + } + return client, true + } else { + return nil, false } +} + +// recursive function to find if there is a DSN annotation +func searchDsn(ctx context.Context, object *metav1.ObjectMeta) (string, error) { - dsn, ok := object.Annotations[DSN] + dsn, ok := object.Annotations[DSNAnnotation] if ok { return dsn, nil } @@ -83,40 +105,13 @@ func searchDsn(ctx context.Context, object metav1.ObjectMeta) (string, error) { } owningRef := object.OwnerReferences[0] - switch kind := owningRef.Kind; kind { - case "Pod": - parentPod, err := clientset.CoreV1().Pods(object.Namespace).Get(context.Background(), owningRef.Name, metav1.GetOptions{}) - if err != nil { - return "", err - } - return searchDsn(ctx, parentPod.ObjectMeta) - case "ReplicaSet": - parentReplicaSet, err := clientset.AppsV1().ReplicaSets(object.Namespace).Get(context.Background(), owningRef.Name, metav1.GetOptions{}) - if err != nil { - return "", err - } - return searchDsn(ctx, parentReplicaSet.ObjectMeta) - case "Deployment": - parentDeployment, err := clientset.AppsV1().Deployments(object.Namespace).Get(context.Background(), owningRef.Name, metav1.GetOptions{}) - if err != nil { - return "", err - } - return searchDsn(ctx, parentDeployment.ObjectMeta) - case "Job": - parentJob, err := clientset.BatchV1().Jobs(object.Namespace).Get(context.Background(), owningRef.Name, metav1.GetOptions{}) - if err != nil { - return "", err - } - return searchDsn(ctx, parentJob.ObjectMeta) - case "CronJob": - parentCronjob, err := clientset.BatchV1().CronJobs(object.Namespace).Get(context.Background(), owningRef.Name, metav1.GetOptions{}) - if err != nil { - return "", err - } - return searchDsn(ctx, parentCronjob.ObjectMeta) - default: - return "", errors.New("unsupported object kind encountered") + owningObjectMeta, err := findObjectMeta(ctx, owningRef.Kind, object.Namespace, owningRef.Name) + + if err != nil { + return "", err } + + return searchDsn(ctx, owningObjectMeta) } func findObjectMeta(ctx context.Context, kind string, namespace string, name string) (*metav1.ObjectMeta, error) { @@ -128,35 +123,35 @@ func findObjectMeta(ctx context.Context, kind string, namespace string, name str switch kind { case "Pod": - parentPod, err := clientset.CoreV1().Pods(namespace).Get(context.Background(), name, metav1.GetOptions{}) + pod, err := clientset.CoreV1().Pods(namespace).Get(context.Background(), name, metav1.GetOptions{}) if err != nil { return nil, err } - return &parentPod.ObjectMeta, nil + return &pod.ObjectMeta, nil case "ReplicaSet": - parentReplicaSet, err := clientset.AppsV1().ReplicaSets(namespace).Get(context.Background(), name, metav1.GetOptions{}) + replicaSet, err := clientset.AppsV1().ReplicaSets(namespace).Get(context.Background(), name, metav1.GetOptions{}) if err != nil { return nil, err } - return &parentReplicaSet.ObjectMeta, nil + return &replicaSet.ObjectMeta, nil case "Deployment": - parentDeployment, err := clientset.AppsV1().Deployments(namespace).Get(context.Background(), name, metav1.GetOptions{}) + deployment, err := clientset.AppsV1().Deployments(namespace).Get(context.Background(), name, metav1.GetOptions{}) if err != nil { return nil, err } - return &parentDeployment.ObjectMeta, nil + return &deployment.ObjectMeta, nil case "Job": - parentJob, err := clientset.BatchV1().Jobs(namespace).Get(context.Background(), name, metav1.GetOptions{}) + job, err := clientset.BatchV1().Jobs(namespace).Get(context.Background(), name, metav1.GetOptions{}) if err != nil { return nil, err } - return &parentJob.ObjectMeta, nil + return &job.ObjectMeta, nil case "CronJob": - parentCronjob, err := clientset.BatchV1().CronJobs(namespace).Get(context.Background(), name, metav1.GetOptions{}) + cronjob, err := clientset.BatchV1().CronJobs(namespace).Get(context.Background(), name, metav1.GetOptions{}) if err != nil { return nil, err } - return &parentCronjob.ObjectMeta, nil + return &cronjob.ObjectMeta, nil default: return nil, errors.New("unsupported object kind encountered") } diff --git a/watcher_events.go b/watcher_events.go index d4d5882..ba4ef62 100644 --- a/watcher_events.go +++ b/watcher_events.go @@ -150,30 +150,9 @@ func handleWatchEvent(ctx context.Context, event *watch.Event, cutoffTime metav1 return } - // Search for alternative DSN in the annotations - altDsn, err := searchDsn(ctx, *objectMeta) - if err != nil { - return - } - - // If we did find an alternative DSN - if altDsn != "" { - // Attempt to retrieve the corresponding client - client, err := dsnData.GetClient(altDsn) - if err != nil { - return - } - - if client == nil { - newOptions := hub.Client().Options() - newOptions.Dsn = altDsn - client, err = dsnData.AddClient(newOptions) - if err != nil { - return - } - } - - // Bind the alternative client to the top layer + // if DSN annotation provided, we bind a new client with that DSN + client, ok := dsnData.GetClientFromObject(ctx, objectMeta, hub.Clone().Client().Options()) + if ok { hub.BindClient(client) } diff --git a/watcher_pods.go b/watcher_pods.go index d516300..1951186 100644 --- a/watcher_pods.go +++ b/watcher_pods.go @@ -104,7 +104,7 @@ func handlePodWatchEvent(ctx context.Context, event *watch.Event) { logger.Error().Msgf("Cannot get Sentry hub from context") return } - // to avoid concurrency issue + // To avoid concurrency issue hub = hub.Clone() containerStatuses := podObject.Status.ContainerStatuses @@ -117,34 +117,13 @@ func handlePodWatchEvent(ctx context.Context, event *watch.Event) { } hub.WithScope(func(scope *sentry.Scope) { - // search for alternative DSN in the annotations - altDsn, err := searchDsn(ctx, podObject.ObjectMeta) - if err != nil { - return - } - - // if we did find an alternative DSN - if altDsn != "" { - // attempt to retrieve the corresponding client - client, err := dsnData.GetClient(altDsn) - if err != nil { - return - } - - if client == nil { - newOptions := hub.Client().Options() - newOptions.Dsn = altDsn - client, err = dsnData.AddClient(newOptions) - if err != nil { - return - } - } - - // bind the alternative client to the top layer + // If DSN annotation provided, we bind a new client with that DSN + client, ok := dsnData.GetClientFromObject(ctx, &podObject.ObjectMeta, hub.Clone().Client().Options()) + if ok { hub.BindClient(client) } - // pass down clone context + // Pass down clone context ctx = sentry.SetHubOnContext(ctx, hub) setWatcherTag(scope, podsWatcherName) sentryEvent := handlePodTerminationEvent(ctx, &status, podObject, scope) From 6480d57490822945aed7021f892053cd984e523e Mon Sep 17 00:00:00 2001 From: Jack Zhang Date: Mon, 11 Dec 2023 14:31:33 -0500 Subject: [PATCH 6/9] fix event watcher to pass unit test --- sentry_dsn_data.go | 32 ++++++++++++++++---------------- watcher_events.go | 14 ++++++-------- 2 files changed, 22 insertions(+), 24 deletions(-) diff --git a/sentry_dsn_data.go b/sentry_dsn_data.go index dc6c0ba..2a4ea6d 100644 --- a/sentry_dsn_data.go +++ b/sentry_dsn_data.go @@ -105,54 +105,54 @@ func searchDsn(ctx context.Context, object *metav1.ObjectMeta) (string, error) { } owningRef := object.OwnerReferences[0] - owningObjectMeta, err := findObjectMeta(ctx, owningRef.Kind, object.Namespace, owningRef.Name) + owningObjectMeta, ok := findObjectMeta(ctx, owningRef.Kind, object.Namespace, owningRef.Name) - if err != nil { - return "", err + if !ok { + return "", errors.New("the DSN cannot be found") } return searchDsn(ctx, owningObjectMeta) } -func findObjectMeta(ctx context.Context, kind string, namespace string, name string) (*metav1.ObjectMeta, error) { +func findObjectMeta(ctx context.Context, kind string, namespace string, name string) (*metav1.ObjectMeta, bool) { clientset, err := getClientsetFromContext(ctx) if err != nil { - return nil, err + return nil, false } switch kind { case "Pod": pod, err := clientset.CoreV1().Pods(namespace).Get(context.Background(), name, metav1.GetOptions{}) if err != nil { - return nil, err + return nil, false } - return &pod.ObjectMeta, nil + return &pod.ObjectMeta, true case "ReplicaSet": replicaSet, err := clientset.AppsV1().ReplicaSets(namespace).Get(context.Background(), name, metav1.GetOptions{}) if err != nil { - return nil, err + return nil, false } - return &replicaSet.ObjectMeta, nil + return &replicaSet.ObjectMeta, true case "Deployment": deployment, err := clientset.AppsV1().Deployments(namespace).Get(context.Background(), name, metav1.GetOptions{}) if err != nil { - return nil, err + return nil, false } - return &deployment.ObjectMeta, nil + return &deployment.ObjectMeta, true case "Job": job, err := clientset.BatchV1().Jobs(namespace).Get(context.Background(), name, metav1.GetOptions{}) if err != nil { - return nil, err + return nil, false } - return &job.ObjectMeta, nil + return &job.ObjectMeta, true case "CronJob": cronjob, err := clientset.BatchV1().CronJobs(namespace).Get(context.Background(), name, metav1.GetOptions{}) if err != nil { - return nil, err + return nil, false } - return &cronjob.ObjectMeta, nil + return &cronjob.ObjectMeta, true default: - return nil, errors.New("unsupported object kind encountered") + return nil, false } } diff --git a/watcher_events.go b/watcher_events.go index ba4ef62..bbcc692 100644 --- a/watcher_events.go +++ b/watcher_events.go @@ -145,15 +145,13 @@ func handleWatchEvent(ctx context.Context, event *watch.Event, cutoffTime metav1 hub.WithScope(func(scope *sentry.Scope) { // Find the object meta that the event is about - objectMeta, err := findObjectMeta(ctx, eventObject.InvolvedObject.Kind, eventObject.InvolvedObject.Namespace, eventObject.InvolvedObject.Name) - if err != nil { - return - } - - // if DSN annotation provided, we bind a new client with that DSN - client, ok := dsnData.GetClientFromObject(ctx, objectMeta, hub.Clone().Client().Options()) + objectMeta, ok := findObjectMeta(ctx, eventObject.InvolvedObject.Kind, eventObject.InvolvedObject.Namespace, eventObject.InvolvedObject.Name) if ok { - hub.BindClient(client) + // if DSN annotation provided, we bind a new client with that DSN + client, ok := dsnData.GetClientFromObject(ctx, objectMeta, hub.Clone().Client().Options()) + if ok { + hub.BindClient(client) + } } // Pass down clone context From 7cdfe627b78ea8c76f916558a9ca0298c4150c48 Mon Sep 17 00:00:00 2001 From: Jack Zhang Date: Tue, 12 Dec 2023 12:51:25 -0500 Subject: [PATCH 7/9] perform minor code cleanup --- crons.go | 2 +- k8s/errors/cronjob-basic-error.yaml | 5 ++--- k8s/errors/cronjob-basic-success.yaml | 5 ++--- k8s/errors/cronjob-late-maybe-error.yaml | 5 ++--- k8s/errors/cronjob-late-success.yaml | 5 ++--- k8s/errors/deployment-create-container-error.yaml | 5 ++--- k8s/errors/pod-outofmemory.yaml | 5 ++--- sentry_dsn_data.go | 6 +----- 8 files changed, 14 insertions(+), 24 deletions(-) diff --git a/crons.go b/crons.go index ddbeb77..5b4dff2 100644 --- a/crons.go +++ b/crons.go @@ -103,7 +103,7 @@ func runSentryCronsCheckin(ctx context.Context, job *batchv1.Job, eventHandlerTy hub.WithScope(func(scope *sentry.Scope) { // If DSN annotation provided, we bind a new client with that DSN - client, ok := dsnData.GetClientFromObject(ctx, &job.ObjectMeta, hub.Clone().Client().Options()) + client, ok := dsnData.GetClientFromObject(ctx, &job.ObjectMeta, hub.Client().Options()) if ok { hub.BindClient(client) } diff --git a/k8s/errors/cronjob-basic-error.yaml b/k8s/errors/cronjob-basic-error.yaml index a219f80..eb30b25 100644 --- a/k8s/errors/cronjob-basic-error.yaml +++ b/k8s/errors/cronjob-basic-error.yaml @@ -4,9 +4,8 @@ metadata: name: cronjob-basic-error labels: type: test-pod - "annotations": { - "k8s.sentry.io/dsn" : "https://474d9da00094c5e39d6800c01f3aeff6@o4506191942320128.ingest.sentry.io/4506363396816896", - } + annotations: + k8s.sentry.io/dsn: "https://474d9da00094c5e39d6800c01f3aeff6@o4506191942320128.ingest.sentry.io/4506363396816896" spec: schedule: "* * * * *" jobTemplate: diff --git a/k8s/errors/cronjob-basic-success.yaml b/k8s/errors/cronjob-basic-success.yaml index b1e8540..ace025e 100644 --- a/k8s/errors/cronjob-basic-success.yaml +++ b/k8s/errors/cronjob-basic-success.yaml @@ -4,9 +4,8 @@ metadata: name: cronjob-basic-success labels: type: test-pod - "annotations": { - "k8s.sentry.io/dsn" : "https://474d9da00094c5e39d6800c01f3aeff6@o4506191942320128.ingest.sentry.io/4506363396816896", - } + annotations: + k8s.sentry.io/dsn: "https://474d9da00094c5e39d6800c01f3aeff6@o4506191942320128.ingest.sentry.io/4506363396816896" spec: schedule: "* * * * *" jobTemplate: diff --git a/k8s/errors/cronjob-late-maybe-error.yaml b/k8s/errors/cronjob-late-maybe-error.yaml index b01adb1..f944921 100644 --- a/k8s/errors/cronjob-late-maybe-error.yaml +++ b/k8s/errors/cronjob-late-maybe-error.yaml @@ -4,9 +4,8 @@ metadata: name: cronjob-late-maybe-error labels: type: test-pod - "annotations": { - "k8s.sentry.io/dsn" : "https://c6a5dd95a40ab7e4e34a3af43c14f848@o4506191942320128.ingest.sentry.io/4506363401601024", - } + annotations: + k8s.sentry.io/dsn: "https://c6a5dd95a40ab7e4e34a3af43c14f848@o4506191942320128.ingest.sentry.io/4506363401601024" spec: schedule: "* * * * *" jobTemplate: diff --git a/k8s/errors/cronjob-late-success.yaml b/k8s/errors/cronjob-late-success.yaml index 2bfdc8b..e1b9144 100644 --- a/k8s/errors/cronjob-late-success.yaml +++ b/k8s/errors/cronjob-late-success.yaml @@ -4,9 +4,8 @@ metadata: name: cronjob-late-success labels: type: test-pod - "annotations": { - "k8s.sentry.io/dsn" : "https://c6a5dd95a40ab7e4e34a3af43c14f848@o4506191942320128.ingest.sentry.io/4506363401601024", - } + annotations: + k8s.sentry.io/dsn: "https://c6a5dd95a40ab7e4e34a3af43c14f848@o4506191942320128.ingest.sentry.io/4506363401601024" spec: schedule: "* * * * *" jobTemplate: diff --git a/k8s/errors/deployment-create-container-error.yaml b/k8s/errors/deployment-create-container-error.yaml index 98de5a8..0e88576 100644 --- a/k8s/errors/deployment-create-container-error.yaml +++ b/k8s/errors/deployment-create-container-error.yaml @@ -5,9 +5,8 @@ metadata: labels: run: deployment-create-container-error type: test-pod - "annotations": { - "k8s.sentry.io/dsn" : "https://474d9da00094c5e39d6800c01f3aeff6@o4506191942320128.ingest.sentry.io/4506363396816896", - } + annotations: + k8s.sentry.io/dsn: "https://474d9da00094c5e39d6800c01f3aeff6@o4506191942320128.ingest.sentry.io/4506363396816896" spec: replicas: 2 selector: diff --git a/k8s/errors/pod-outofmemory.yaml b/k8s/errors/pod-outofmemory.yaml index 6c8be41..997aab8 100644 --- a/k8s/errors/pod-outofmemory.yaml +++ b/k8s/errors/pod-outofmemory.yaml @@ -6,9 +6,8 @@ metadata: run: pod-outofmemory type: test-pod name: pod-outofmemory - "annotations": { - "k8s.sentry.io/dsn" : "https://474d9da00094c5e39d6800c01f3aeff6@o4506191942320128.ingest.sentry.io/4506363396816896", - } + annotations: + k8s.sentry.io/dsn: "https://474d9da00094c5e39d6800c01f3aeff6@o4506191942320128.ingest.sentry.io/4506363396816896" spec: containers: - image: python:3.10-alpine diff --git a/sentry_dsn_data.go b/sentry_dsn_data.go index 2a4ea6d..61a7443 100644 --- a/sentry_dsn_data.go +++ b/sentry_dsn_data.go @@ -59,11 +59,7 @@ func (d *DsnClientMapping) GetClientFromMap(dsn string) (*sentry.Client, bool) { // check if we have this dsn existingClient, ok := d.clientMap[dsn] - if ok { - return existingClient, true - } else { - return nil, false - } + return existingClient, ok } func (d *DsnClientMapping) GetClientFromObject(ctx context.Context, objectMeta *metav1.ObjectMeta, clientOptions sentry.ClientOptions) (*sentry.Client, bool) { From f90e584bd1bdc5250cc12656f3e4816ed5f1d511 Mon Sep 17 00:00:00 2001 From: Jack Zhang Date: Tue, 12 Dec 2023 13:17:34 -0500 Subject: [PATCH 8/9] add flag for custom dsn --- .env.example | 1 + sentry_dsn_data.go | 19 +++++++++++++++---- watcher_events.go | 2 +- watcher_pods.go | 2 +- 4 files changed, 18 insertions(+), 6 deletions(-) diff --git a/.env.example b/.env.example index 9b81918..271e532 100644 --- a/.env.example +++ b/.env.example @@ -7,3 +7,4 @@ SENTRY_K8S_CLUSTER_CONFIG_TYPE="" SENTRY_K8S_KUBECONFIG_PATH="" SENTRY_K8S_LOG_LEVEL="" SENTRY_K8S_MONITOR_CRONJOBS="" +SENTRY_K8S_CUSTOM_DSNS="" diff --git a/sentry_dsn_data.go b/sentry_dsn_data.go index 61a7443..f7b0e21 100644 --- a/sentry_dsn_data.go +++ b/sentry_dsn_data.go @@ -3,6 +3,7 @@ package main import ( "context" "errors" + "os" "sync" "github.com/getsentry/sentry-go" @@ -13,14 +14,16 @@ var DSNAnnotation = "k8s.sentry.io/dsn" // map from Sentry DSN to Client type DsnClientMapping struct { - mutex sync.RWMutex - clientMap map[string]*sentry.Client + mutex sync.RWMutex + clientMap map[string]*sentry.Client + customDsnFlag bool } func NewDsnData() *DsnClientMapping { return &DsnClientMapping{ - mutex: sync.RWMutex{}, - clientMap: make(map[string]*sentry.Client), + mutex: sync.RWMutex{}, + clientMap: make(map[string]*sentry.Client), + customDsnFlag: isTruthy(os.Getenv("SENTRY_K8S_CUSTOM_DSNS")), } } @@ -64,6 +67,14 @@ func (d *DsnClientMapping) GetClientFromMap(dsn string) (*sentry.Client, bool) { func (d *DsnClientMapping) GetClientFromObject(ctx context.Context, objectMeta *metav1.ObjectMeta, clientOptions sentry.ClientOptions) (*sentry.Client, bool) { + // If the custom DSN flag is set to false + // then avoid searching for the custom DSN + // or adding an alternative client and instead + // just return nil as the client + if !d.customDsnFlag { + return nil, false + } + // find DSN annotation from the object altDsn, err := searchDsn(ctx, objectMeta) if err != nil { diff --git a/watcher_events.go b/watcher_events.go index bbcc692..9c3824f 100644 --- a/watcher_events.go +++ b/watcher_events.go @@ -148,7 +148,7 @@ func handleWatchEvent(ctx context.Context, event *watch.Event, cutoffTime metav1 objectMeta, ok := findObjectMeta(ctx, eventObject.InvolvedObject.Kind, eventObject.InvolvedObject.Namespace, eventObject.InvolvedObject.Name) if ok { // if DSN annotation provided, we bind a new client with that DSN - client, ok := dsnData.GetClientFromObject(ctx, objectMeta, hub.Clone().Client().Options()) + client, ok := dsnData.GetClientFromObject(ctx, objectMeta, hub.Client().Options()) if ok { hub.BindClient(client) } diff --git a/watcher_pods.go b/watcher_pods.go index 1951186..32f4ab6 100644 --- a/watcher_pods.go +++ b/watcher_pods.go @@ -118,7 +118,7 @@ func handlePodWatchEvent(ctx context.Context, event *watch.Event) { hub.WithScope(func(scope *sentry.Scope) { // If DSN annotation provided, we bind a new client with that DSN - client, ok := dsnData.GetClientFromObject(ctx, &podObject.ObjectMeta, hub.Clone().Client().Options()) + client, ok := dsnData.GetClientFromObject(ctx, &podObject.ObjectMeta, hub.Client().Options()) if ok { hub.BindClient(client) } From 301df178210129bf55a5db31d0b183b1abbae19c Mon Sep 17 00:00:00 2001 From: Jack Zhang Date: Wed, 13 Dec 2023 18:34:17 -0500 Subject: [PATCH 9/9] add minor changes to sentry dsn data --- crons.go | 2 +- sentry_dsn_data.go | 35 ++++++++++++++++------------------- watcher_events.go | 2 +- watcher_pods.go | 3 +-- 4 files changed, 19 insertions(+), 23 deletions(-) diff --git a/crons.go b/crons.go index 5b4dff2..06a2986 100644 --- a/crons.go +++ b/crons.go @@ -103,7 +103,7 @@ func runSentryCronsCheckin(ctx context.Context, job *batchv1.Job, eventHandlerTy hub.WithScope(func(scope *sentry.Scope) { // If DSN annotation provided, we bind a new client with that DSN - client, ok := dsnData.GetClientFromObject(ctx, &job.ObjectMeta, hub.Client().Options()) + client, ok := dsnClientMapping.GetClientFromObject(ctx, &job.ObjectMeta, hub.Client().Options()) if ok { hub.BindClient(client) } diff --git a/sentry_dsn_data.go b/sentry_dsn_data.go index f7b0e21..f944fdb 100644 --- a/sentry_dsn_data.go +++ b/sentry_dsn_data.go @@ -12,14 +12,16 @@ import ( var DSNAnnotation = "k8s.sentry.io/dsn" -// map from Sentry DSN to Client +var dsnClientMapping = NewDsnClientMapping() + +// Map from Sentry DSN to Client type DsnClientMapping struct { mutex sync.RWMutex clientMap map[string]*sentry.Client customDsnFlag bool } -func NewDsnData() *DsnClientMapping { +func NewDsnClientMapping() *DsnClientMapping { return &DsnClientMapping{ mutex: sync.RWMutex{}, clientMap: make(map[string]*sentry.Client), @@ -27,19 +29,15 @@ func NewDsnData() *DsnClientMapping { } } -// return client if added successfully +// Return client if added successfully // (also returns client if already exists) func (d *DsnClientMapping) AddClientToMap(options sentry.ClientOptions) (*sentry.Client, error) { d.mutex.Lock() defer d.mutex.Unlock() - // check if we already encountered this dsn - existingClient, ok := d.clientMap[options.Dsn] - if ok { - return existingClient, nil - } - - // create a new client for the dsn + // Create a new client for the dsn + // even if client already exists, it + // will be re-initialized with a new client newClient, err := sentry.NewClient( sentry.ClientOptions{ Dsn: options.Dsn, @@ -51,16 +49,15 @@ func (d *DsnClientMapping) AddClientToMap(options sentry.ClientOptions) (*sentry return nil, err } d.clientMap[options.Dsn] = newClient - return newClient, nil } -// retrieve a client with given dsn +// Retrieve a client with given dsn func (d *DsnClientMapping) GetClientFromMap(dsn string) (*sentry.Client, bool) { d.mutex.RLock() defer d.mutex.RUnlock() - // check if we have this dsn + // Check if we have this dsn existingClient, ok := d.clientMap[dsn] return existingClient, ok } @@ -75,20 +72,20 @@ func (d *DsnClientMapping) GetClientFromObject(ctx context.Context, objectMeta * return nil, false } - // find DSN annotation from the object + // Find DSN annotation from the object altDsn, err := searchDsn(ctx, objectMeta) if err != nil { return nil, false } - // if we did find an alternative DSN + // If we did find an alternative DSN if altDsn != "" { - // attempt to retrieve the corresponding client - client, _ := dsnData.GetClientFromMap(altDsn) + // Attempt to retrieve the corresponding client + client, _ := dsnClientMapping.GetClientFromMap(altDsn) if client == nil { // create new client clientOptions.Dsn = altDsn - client, err = dsnData.AddClientToMap(clientOptions) + client, err = dsnClientMapping.AddClientToMap(clientOptions) if err != nil { return nil, false } @@ -99,7 +96,7 @@ func (d *DsnClientMapping) GetClientFromObject(ctx context.Context, objectMeta * } } -// recursive function to find if there is a DSN annotation +// Recursive function to find if there is a DSN annotation func searchDsn(ctx context.Context, object *metav1.ObjectMeta) (string, error) { dsn, ok := object.Annotations[DSNAnnotation] diff --git a/watcher_events.go b/watcher_events.go index 9c3824f..8294265 100644 --- a/watcher_events.go +++ b/watcher_events.go @@ -148,7 +148,7 @@ func handleWatchEvent(ctx context.Context, event *watch.Event, cutoffTime metav1 objectMeta, ok := findObjectMeta(ctx, eventObject.InvolvedObject.Kind, eventObject.InvolvedObject.Namespace, eventObject.InvolvedObject.Name) if ok { // if DSN annotation provided, we bind a new client with that DSN - client, ok := dsnData.GetClientFromObject(ctx, objectMeta, hub.Client().Options()) + client, ok := dsnClientMapping.GetClientFromObject(ctx, objectMeta, hub.Client().Options()) if ok { hub.BindClient(client) } diff --git a/watcher_pods.go b/watcher_pods.go index 32f4ab6..98c69f2 100644 --- a/watcher_pods.go +++ b/watcher_pods.go @@ -20,7 +20,6 @@ import ( const podsWatcherName = "pods" var cronsMetaData = NewCronsMetaData() -var dsnData = NewDsnData() func handlePodTerminationEvent(ctx context.Context, containerStatus *v1.ContainerStatus, pod *v1.Pod, scope *sentry.Scope) *sentry.Event { logger := zerolog.Ctx(ctx) @@ -118,7 +117,7 @@ func handlePodWatchEvent(ctx context.Context, event *watch.Event) { hub.WithScope(func(scope *sentry.Scope) { // If DSN annotation provided, we bind a new client with that DSN - client, ok := dsnData.GetClientFromObject(ctx, &podObject.ObjectMeta, hub.Client().Options()) + client, ok := dsnClientMapping.GetClientFromObject(ctx, &podObject.ObjectMeta, hub.Client().Options()) if ok { hub.BindClient(client) }