mirror of
https://github.com/SamyRai/tercul-backend.git
synced 2025-12-27 02:51:34 +00:00
This commit addresses several high-priority tasks from the TASKS.md file, including: - **Fix Background Job Panic:** Replaced `log.Fatalf` with `log.Printf` in the `asynq` server to prevent crashes. - **Refactor API Server Setup:** Consolidated the GraphQL Playground and Prometheus metrics endpoints into the main API server. - **Implement `DeleteUser` Mutation:** Implemented the `DeleteUser` resolver. - **Implement `CreateContribution` Mutation:** Implemented the `CreateContribution` resolver and its required application service. Additionally, this commit includes a major refactoring of the configuration management system to fix a broken build. The global `config.Cfg` variable has been removed and replaced with a dependency injection approach, where the configuration object is passed to all components that require it. This change has been applied across the entire codebase, including the test suite, to ensure a stable and testable application.
60 lines
1.7 KiB
Go
60 lines
1.7 KiB
Go
package sync
|
|
|
|
import (
|
|
"context"
|
|
"fmt"
|
|
"log"
|
|
"tercul/internal/domain"
|
|
)
|
|
|
|
// SyncAllEdges syncs all edges by enqueueing batch jobs.
|
|
func (s *SyncJob) SyncAllEdges(ctx context.Context) error {
|
|
log.Println("Enqueueing edge sync jobs...")
|
|
|
|
var count int64
|
|
if err := s.DB.Model(&domain.Edge{}).Count(&count).Error; err != nil {
|
|
return fmt.Errorf("error counting edges: %w", err)
|
|
}
|
|
|
|
batchSize := 100
|
|
for offset := 0; offset < int(count); offset += batchSize {
|
|
if err := EnqueueEdgeSync(s.AsynqClient, batchSize, offset); err != nil {
|
|
log.Printf("Error enqueueing edge sync job (offset %d): %v", offset, err)
|
|
} else {
|
|
log.Printf("Enqueued edge sync job (offset %d, batch size %d)", offset, batchSize)
|
|
}
|
|
}
|
|
|
|
log.Println("Edge sync jobs enqueued successfully.")
|
|
return nil
|
|
}
|
|
|
|
// SyncEdgesBatch syncs a batch of edges.
|
|
func (s *SyncJob) SyncEdgesBatch(ctx context.Context, batchSize, offset int) error {
|
|
log.Printf("Syncing edges batch (offset %d, batch size %d)...", offset, batchSize)
|
|
|
|
var edges []domain.Edge
|
|
if err := s.DB.Limit(batchSize).Offset(offset).Find(&edges).Error; err != nil {
|
|
return fmt.Errorf("error fetching edges batch: %w", err)
|
|
}
|
|
|
|
// Convert edges to map format for batch processing
|
|
var edgeMaps []map[string]interface{}
|
|
for _, edge := range edges {
|
|
edgeMap := map[string]interface{}{
|
|
"id": edge.ID,
|
|
"sourceTable": edge.SourceTable,
|
|
"sourceID": edge.SourceID,
|
|
"targetTable": edge.TargetTable,
|
|
"targetID": edge.TargetID,
|
|
"relation": edge.Relation,
|
|
"language": edge.Language,
|
|
"extra": edge.Extra,
|
|
}
|
|
edgeMaps = append(edgeMaps, edgeMap)
|
|
}
|
|
|
|
batchProcessor := NewBatchProcessor(s.DB, s.Cfg)
|
|
return batchProcessor.CreateObjectsBatch(ctx, "Edge", edgeMaps)
|
|
}
|