Skip to content

Commit

Permalink
Implement separate readiness check
Browse files Browse the repository at this point in the history
  • Loading branch information
weeco committed Apr 22, 2019
1 parent 727e8c4 commit dcc0490
Showing 1 changed file with 17 additions and 3 deletions.
20 changes: 17 additions & 3 deletions main.go
Original file line number Diff line number Diff line change
Expand Up @@ -41,7 +41,7 @@ func main() {
clusterCh := make(chan *kafka.StorageRequest, 200)

// Create storage module
cache := storage.NewOffsetStorage(consumerOffsetsCh, clusterCh)
cache := storage.NewMemoryStorage(consumerOffsetsCh, clusterCh)
cache.Start()

// Create cluster module
Expand All @@ -58,13 +58,14 @@ func main() {

// Start listening on /metrics endpoint
http.Handle("/metrics", promhttp.Handler())
http.Handle("/healthcheck", healthcheck(cluster))
http.Handle("/healthcheck", healthCheck(cluster))
http.Handle("/readycheck", readyCheck(cache))
listenAddress := net.JoinHostPort(opts.TelemetryHost, strconv.Itoa(opts.TelemetryPort))
log.Infof("Listening on: '%s", listenAddress)
log.Fatal(http.ListenAndServe(listenAddress, nil))
}

func healthcheck(cluster *kafka.Cluster) http.HandlerFunc {
func healthCheck(cluster *kafka.Cluster) http.HandlerFunc {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if cluster.IsHealthy() {
w.Write([]byte("Healthy"))
Expand All @@ -73,3 +74,16 @@ func healthcheck(cluster *kafka.Cluster) http.HandlerFunc {
}
})
}

// readyCheck only returns 200 when it has initially consumed the __consumer_offsets topic
// Utilizing this ready check you can ensure to slow down rolling updates until a pod is ready
// to expose consumer group metrics which are up to date
func readyCheck(storage *storage.MemoryStorage) http.HandlerFunc {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if storage.IsConsumed() {
w.Write([]byte("Ready"))
} else {
http.Error(w, "Offsets topic has not been consumed yet", http.StatusServiceUnavailable)
}
})
}

0 comments on commit dcc0490

Please sign in to comment.