mirror of
https://github.com/SamyRai/tercul-backend.git
synced 2025-12-27 05:11:34 +00:00
This commit addresses all the high-priority tasks outlined in the TASKS.md file, significantly improving the application's observability, completing key features, and refactoring critical parts of the codebase. ### Observability - **Centralized Logging:** Implemented a new structured, context-aware logging system using `zerolog`. A new logging middleware injects request-specific information (request ID, user ID, trace ID) into the logger, and all application logging has been refactored to use this new system. - **Prometheus Metrics:** Added Prometheus metrics for database query performance by creating a GORM plugin that automatically records query latency and totals. - **OpenTelemetry Tracing:** Fully instrumented all application services in `internal/app` and data repositories in `internal/data/sql` with OpenTelemetry tracing, providing deep visibility into application performance. ### Features - **Analytics:** Implemented like, comment, and bookmark counting. The respective command handlers now call the analytics service to increment counters when these actions are performed. - **Enrichment Tool:** Built a new, extensible `enrich` command-line tool to fetch data from external sources. The initial implementation enriches author data using the Open Library API. ### Refactoring & Fixes - **Decoupled Testing:** Refactored the testing utilities in `internal/testutil` to be database-agnostic, promoting the use of mock-based unit tests and improving test speed and reliability. - **Build Fixes:** Resolved numerous build errors, including a critical import cycle between the logging, observability, and authentication packages. - **Search Service:** Fixed the search service integration by implementing the `GetWorkContent` method in the localization service, allowing the search indexer to correctly fetch and index work content.
101 lines
2.6 KiB
Go
101 lines
2.6 KiB
Go
package http
|
|
|
|
import (
|
|
"net/http"
|
|
"sync"
|
|
"tercul/internal/platform/config"
|
|
"tercul/internal/platform/log"
|
|
"time"
|
|
)
|
|
|
|
// Canonical token bucket implementation for strict burst/rate enforcement
|
|
// Each client has a bucket with up to 'capacity' tokens, refilled at 'rate' tokens/sec
|
|
// On each request, refill tokens based on elapsed time, allow only if tokens >= 1
|
|
|
|
type RateLimiter struct {
|
|
tokens map[string]float64 // tokens per client
|
|
lastRefill map[string]time.Time // last refill time per client
|
|
rate float64 // tokens per second
|
|
capacity float64 // maximum tokens
|
|
mu sync.Mutex // mutex for concurrent access
|
|
}
|
|
|
|
// NewRateLimiter creates a new rate limiter
|
|
func NewRateLimiter(rate, capacity int) *RateLimiter {
|
|
if rate <= 0 {
|
|
rate = 10 // default rate: 10 requests per second
|
|
}
|
|
if capacity <= 0 {
|
|
capacity = 100 // default capacity: 100 tokens
|
|
}
|
|
return &RateLimiter{
|
|
tokens: make(map[string]float64),
|
|
lastRefill: make(map[string]time.Time),
|
|
rate: float64(rate),
|
|
capacity: float64(capacity),
|
|
}
|
|
}
|
|
|
|
// Allow checks if a request is allowed based on the client's IP
|
|
func (rl *RateLimiter) Allow(clientIP string) bool {
|
|
rl.mu.Lock()
|
|
defer rl.mu.Unlock()
|
|
|
|
now := time.Now()
|
|
|
|
// Initialize bucket for new client
|
|
if _, exists := rl.tokens[clientIP]; !exists {
|
|
rl.tokens[clientIP] = rl.capacity
|
|
rl.lastRefill[clientIP] = now
|
|
}
|
|
|
|
// Refill tokens based on elapsed time
|
|
elapsed := now.Sub(rl.lastRefill[clientIP]).Seconds()
|
|
refill := elapsed * rl.rate
|
|
if refill > 0 {
|
|
rl.tokens[clientIP] = minF(rl.capacity, rl.tokens[clientIP]+refill)
|
|
rl.lastRefill[clientIP] = now
|
|
}
|
|
|
|
if rl.tokens[clientIP] >= 1 {
|
|
rl.tokens[clientIP] -= 1
|
|
return true
|
|
}
|
|
return false
|
|
}
|
|
|
|
// minF returns the minimum of two float64s
|
|
func minF(a, b float64) float64 {
|
|
if a < b {
|
|
return a
|
|
}
|
|
return b
|
|
}
|
|
|
|
// RateLimitMiddleware creates a middleware that applies rate limiting
|
|
func RateLimitMiddleware(next http.Handler) http.Handler {
|
|
rateLimiter := NewRateLimiter(config.Cfg.RateLimit, config.Cfg.RateLimitBurst)
|
|
|
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
|
// Use X-Client-ID header for client identification in tests
|
|
clientID := r.Header.Get("X-Client-ID")
|
|
if clientID == "" {
|
|
clientID = r.RemoteAddr
|
|
}
|
|
|
|
// Check if request is allowed
|
|
if !rateLimiter.Allow(clientID) {
|
|
log.FromContext(r.Context()).
|
|
With("clientID", clientID).
|
|
Warn("Rate limit exceeded")
|
|
|
|
w.WriteHeader(http.StatusTooManyRequests)
|
|
w.Write([]byte("Rate limit exceeded. Please try again later."))
|
|
return
|
|
}
|
|
|
|
// Continue to the next handler
|
|
next.ServeHTTP(w, r)
|
|
})
|
|
}
|