package testutils import ( "context" "crypto/sha256" "database/sql" "encoding/hex" "fmt" "io" "os" "strings" "syscall" "testing" "time" "bugulma/backend/internal/domain" _ "github.com/jackc/pgx/v5/stdlib" "github.com/peterldowns/pgtestdb" "github.com/testcontainers/testcontainers-go" tcpostgres "github.com/testcontainers/testcontainers-go/modules/postgres" "github.com/testcontainers/testcontainers-go/wait" "gorm.io/driver/postgres" "gorm.io/gorm" "gorm.io/gorm/logger" ) // GormMigrator implements pgtestdb.Migrator interface for GORM migrations // This migrator ensures PostGIS is properly set up before running migrations type GormMigrator struct{} // Hash returns a unique identifier for the migration state // This is used by pgtestdb to identify template databases // Dynamically scans migration files to create a deterministic hash func (m *GormMigrator) Hash() (string, error) { // Scan migration directories for files migrationDirs := []string{ "migrations/postgres", "migrations/neo4j", "migrations/init", } var migrationData []byte // For each migration directory, collect file names and modification times for _, dir := range migrationDirs { files, err := os.ReadDir(dir) if err != nil { // If directory doesn't exist, skip it (not an error) continue } // Sort files for deterministic ordering for _, file := range files { if !file.IsDir() && (strings.HasSuffix(file.Name(), ".sql") || strings.HasSuffix(file.Name(), ".cypher")) { info, err := file.Info() if err != nil { continue } // Include filename and modification time in hash data migrationData = append(migrationData, []byte(fmt.Sprintf("%s:%d:", file.Name(), info.ModTime().Unix()))...) } } } // Also include domain struct changes that affect migrations // This ensures hash changes when domain models are modified migrationData = append(migrationData, []byte("domain-models-v1")...) hash := sha256.Sum256(migrationData) return hex.EncodeToString(hash[:]), nil } // Migrate runs all GORM migrations including PostGIS setup // This function is called by pgtestdb when creating template databases // PostGIS extension MUST be created before any migrations that use geometry types func (m *GormMigrator) Migrate(ctx context.Context, db *sql.DB, config pgtestdb.Config) error { // Step 1: Try to enable PostGIS extension FIRST (before any migrations) // This must be done using raw SQL connection, not GORM, to ensure it persists // If PostGIS cannot be enabled (e.g., permission issues), we'll skip PostGIS migrations fmt.Printf("Attempting to enable PostGIS extension...\n") postgisEnabled := false if err := enablePostGISExtension(db); err != nil { // PostGIS extension creation failed - this might happen if: // - User doesn't have permission to create extensions // - PostGIS is not installed in PostgreSQL // - Template database restrictions // We'll continue without PostGIS - tests that need it will fail with clear errors fmt.Printf("PostGIS extension failed: %v\n", err) _ = err postgisEnabled = false } else { fmt.Printf("PostGIS extension enabled successfully\n") postgisEnabled = true } // Step 2: Create GORM connection gormDB, err := gorm.Open(postgres.New(postgres.Config{ Conn: db, }), &gorm.Config{ Logger: logger.Default.LogMode(logger.Silent), }) if err != nil { return fmt.Errorf("failed to create GORM connection: %w", err) } // Step 3: Clean up any leftover geometry columns from previous runs if err := cleanupGeometryColumns(gormDB); err != nil { fmt.Printf("Warning: failed to cleanup geometry columns before migration: %v\n", err) // Don't fail - continue with migration } // Step 4: Run domain migrations (creates tables, but not PostGIS columns) if err := domain.AutoMigrate(gormDB); err != nil { return fmt.Errorf("failed to run domain migrations: %w", err) } // Step 4: Run PostGIS-specific migrations (creates geometry columns, indexes) // This will succeed whether PostGIS is enabled or not fmt.Printf("Running PostGIS migrations...\n") if err := domain.RunPostGISMigrations(gormDB); err != nil { // PostGIS migrations failed or were skipped - this is OK for test environments // The service should handle missing PostGIS gracefully fmt.Printf("PostGIS migrations result: %v (this is OK for test environments)\n", err) } else { fmt.Printf("PostGIS migrations completed successfully\n") } // If PostGIS is not enabled, skip PostGIS migrations // Tests that need PostGIS will fail with appropriate error messages // Step 5: Create additional indexes if err := domain.CreateIndexes(gormDB); err != nil { // Index creation errors are non-fatal (indexes might already exist) // Log but don't fail - this allows migrations to be idempotent _ = err } // Step 6: Run geographical feature migrations manually // Since geographical features are created via golang-migrate, we need to run them manually if postgisEnabled { if err := runGeographicalFeatureMigrations(gormDB); err != nil { // Geographical migrations are important for geo tests but not critical for core functionality // Log the error but don't fail - tests that need geographical features will fail appropriately _ = err } } return nil } // runGeographicalFeatureMigrations runs the geographical feature table migrations func runGeographicalFeatureMigrations(db *gorm.DB) error { // Create geographical_features table createTableSQL := ` CREATE TABLE IF NOT EXISTS geographical_features ( id TEXT PRIMARY KEY, name TEXT, feature_type VARCHAR(50) NOT NULL, osm_type VARCHAR(50), osm_id VARCHAR(50), properties JSONB DEFAULT '{}'::jsonb, processing_version VARCHAR(20) DEFAULT '1.0', quality_score DOUBLE PRECISION DEFAULT 0.0, source VARCHAR(100) DEFAULT 'osm', created_at TIMESTAMP WITH TIME ZONE DEFAULT NOW(), updated_at TIMESTAMP WITH TIME ZONE DEFAULT NOW() ); ` if err := db.Exec(createTableSQL).Error; err != nil { return fmt.Errorf("failed to create geographical_features table: %w", err) } // Add geometry column addGeometrySQL := ` DO $$ BEGIN IF NOT EXISTS ( SELECT 1 FROM information_schema.columns WHERE table_name = 'geographical_features' AND column_name = 'geometry' ) THEN ALTER TABLE geographical_features ADD COLUMN geometry GEOMETRY(Geometry, 4326); END IF; END $$; ` if err := db.Exec(addGeometrySQL).Error; err != nil { return fmt.Errorf("failed to add geometry column: %w", err) } // Create indexes indexSQLs := []string{ `CREATE INDEX IF NOT EXISTS idx_geographical_features_geometry ON geographical_features USING GIST (geometry)`, `CREATE INDEX IF NOT EXISTS idx_geographical_features_type ON geographical_features (feature_type)`, `CREATE INDEX IF NOT EXISTS idx_geographical_features_osm_id ON geographical_features (osm_type, osm_id)`, `CREATE INDEX IF NOT EXISTS idx_geographical_features_properties ON geographical_features USING GIN (properties)`, `CREATE INDEX IF NOT EXISTS idx_geographical_features_created_at ON geographical_features (created_at)`, } for _, sql := range indexSQLs { if err := db.Exec(sql).Error; err != nil { return fmt.Errorf("failed to create index: %w", err) } } // Add site footprint geometry column addFootprintSQL := ` DO $$ BEGIN IF NOT EXISTS ( SELECT 1 FROM information_schema.columns WHERE table_name = 'sites' AND column_name = 'footprint_geometry' ) THEN ALTER TABLE sites ADD COLUMN footprint_geometry geometry(Polygon, 4326); CREATE INDEX IF NOT EXISTS idx_sites_footprint_geometry ON sites USING GIST (footprint_geometry); END IF; END $$; ` if err := db.Exec(addFootprintSQL).Error; err != nil { return fmt.Errorf("failed to add footprint geometry column: %w", err) } return nil } // enablePostGISExtension enables the PostGIS extension in the database // This must be done using raw SQL connection to ensure it persists // PostGIS extension is required for spatial operations and geometry types // The extension must be created BEFORE any migrations that use geometry types func enablePostGISExtension(db *sql.DB) error { fmt.Printf("Attempting to create PostGIS extension...\n") // Check current database name and user var dbName, currentUser string if err := db.QueryRow("SELECT current_database(), current_user").Scan(&dbName, ¤tUser); err != nil { fmt.Printf("Warning: Could not get database info: %v\n", err) } else { fmt.Printf("Creating PostGIS extension in database: %s as user: %s\n", dbName, currentUser) } // Check if user has superuser privileges var isSuper bool if err := db.QueryRow("SELECT usesuper FROM pg_user WHERE usename = $1", currentUser).Scan(&isSuper); err != nil { fmt.Printf("Warning: Could not check superuser status: %v\n", err) } else { fmt.Printf("User %s superuser status: %t\n", currentUser, isSuper) } // Try to create the extension with different approaches var err error // First try: standard approach if _, err = db.Exec("CREATE EXTENSION IF NOT EXISTS postgis"); err != nil { fmt.Printf("Standard extension creation failed: %v\n", err) // Second try: specify schema explicitly if _, err = db.Exec("CREATE EXTENSION IF NOT EXISTS postgis SCHEMA public"); err != nil { fmt.Printf("Schema-specific extension creation failed: %v\n", err) // Third try: check if extension already exists var exists bool if checkErr := db.QueryRow("SELECT EXISTS(SELECT 1 FROM pg_extension WHERE extname = 'postgis')").Scan(&exists); checkErr != nil { fmt.Printf("Failed to check if extension exists: %v\n", checkErr) } else if exists { fmt.Printf("PostGIS extension already exists, proceeding...\n") err = nil // Reset error since extension exists } else { fmt.Printf("PostGIS extension does not exist and cannot be created\n") } } } if err != nil { fmt.Printf("ERROR: All attempts to create/enable PostGIS extension failed: %v\n", err) return fmt.Errorf("failed to create PostGIS extension: %w", err) } fmt.Printf("PostGIS extension creation/setup completed\n") // Verify PostGIS is properly initialized by checking for PostGIS functions var version string if err := db.QueryRow("SELECT PostGIS_Version()").Scan(&version); err != nil { fmt.Printf("ERROR: PostGIS extension exists but functions not available: %v\n", err) return fmt.Errorf("PostGIS extension exists but functions are not available: %w", err) } fmt.Printf("SUCCESS: PostGIS enabled successfully, version: %s\n", version) return nil } // getEnv gets environment variable or returns default value func getEnv(key, defaultValue string) string { if value := os.Getenv(key); value != "" { return value } return defaultValue } // ginkgoTBWrapper wraps Ginkgo's FullGinkgoTInterface to work with testing.TB // This wrapper adapts Ginkgo's interface to the testing.TB interface used by testcontainers type ginkgoTBWrapper struct { t interface { Helper() Logf(format string, args ...interface{}) Fatalf(format string, args ...interface{}) Failed() bool } cleanups []func() } func (w *ginkgoTBWrapper) Helper() { w.t.Helper() } func (w *ginkgoTBWrapper) Log(args ...interface{}) { w.t.Logf(fmt.Sprint(args...)) } func (w *ginkgoTBWrapper) Logf(format string, args ...interface{}) { w.t.Logf(format, args...) } func (w *ginkgoTBWrapper) Fatalf(format string, args ...interface{}) { w.t.Fatalf(format, args...) } func (w *ginkgoTBWrapper) Failed() bool { return w.t.Failed() } func (w *ginkgoTBWrapper) Cleanup(fn func()) { w.cleanups = append(w.cleanups, fn) } func (w *ginkgoTBWrapper) Errorf(format string, args ...interface{}) { w.t.Fatalf(format, args...) } func (w *ginkgoTBWrapper) Skip(args ...interface{}) { w.t.Logf("SKIP: " + fmt.Sprint(args...)) } func (w *ginkgoTBWrapper) Skipf(format string, args ...interface{}) { w.t.Logf("SKIP: "+format, args...) } func (w *ginkgoTBWrapper) SkipNow() { w.t.Logf("SKIP: test skipped") } func (w *ginkgoTBWrapper) TempDir() string { // For testcontainers, we don't need a temp dir, return empty string return "" } func (w *ginkgoTBWrapper) Name() string { return "GinkgoTest" } func (w *ginkgoTBWrapper) Setenv(key, value string) { // Not needed for our use case } func (w *ginkgoTBWrapper) Parallel() { // Not needed for our use case } func (w *ginkgoTBWrapper) Run(name string, f func(t *testing.T)) bool { // Not needed for our use case return true } func (w *ginkgoTBWrapper) Deadline() (deadline time.Time, ok bool) { return time.Time{}, false } // Additional testing.TB methods func (w *ginkgoTBWrapper) Attr(key, value string) { // Not needed for our use case } func (w *ginkgoTBWrapper) Error(args ...interface{}) { w.t.Fatalf(fmt.Sprint(args...)) } func (w *ginkgoTBWrapper) Fatal(args ...interface{}) { w.t.Fatalf(fmt.Sprint(args...)) } func (w *ginkgoTBWrapper) Fail() { w.t.Fatalf("test failed") } func (w *ginkgoTBWrapper) FailNow() { w.t.Fatalf("test failed") } func (w *ginkgoTBWrapper) Chdir(dir string) { // Not needed for our use case } func (w *ginkgoTBWrapper) Skipped() bool { return false } func (w *ginkgoTBWrapper) Context() context.Context { return context.Background() } func (w *ginkgoTBWrapper) Output() io.Writer { // Return a dummy writer return &dummyWriter{} } // dummyWriter implements io.Writer for testing type dummyWriter struct{} func (d *dummyWriter) Write(p []byte) (n int, err error) { return len(p), nil } // SetupTestDB creates an isolated PostgreSQL database for testing using pgtestdb // DEPRECATED: Use SetupTestDBWithTestcontainers() for better isolation and no local PostgreSQL requirement // Each test gets its own temporary database with migrations already applied // This function accepts testing.TB interface, compatible with *testing.T // // Example usage: // // func TestMyFeature(t *testing.T) { // db := testutils.SetupTestDB(t) // repo := repository.NewMyRepository(db) // // Your test code here // } func SetupTestDB(t testing.TB) *gorm.DB { return setupTestDBWithTB(t) } // SetupTestDBForGinkgo creates an isolated PostgreSQL database for Ginkgo tests // DEPRECATED: Use SetupTestDBWithTestcontainers() for better isolation and no local PostgreSQL requirement // Use this function in Ginkgo BeforeEach blocks // // Example usage: // // BeforeEach(func() { // db = testutils.SetupTestDBForGinkgo(GinkgoT()) // repo = repository.NewMyRepository(db) // }) func SetupTestDBForGinkgo(ginkgoT interface { Helper() Logf(format string, args ...interface{}) Fatalf(format string, args ...interface{}) Failed() bool }) *gorm.DB { wrapper := &ginkgoTBWrapper{t: ginkgoT} return setupTestDBWithTB(wrapper) } // setupTestDBWithTB is the internal implementation that works with both // standard testing.T and Ginkgo's testing interface func setupTestDBWithTB(t interface { Helper() Logf(format string, args ...interface{}) Fatalf(format string, args ...interface{}) Skipf(format string, args ...interface{}) Failed() bool Cleanup(func()) }) *gorm.DB { // Configure PostgreSQL connection // Defaults match the running Docker Compose PostgreSQL container (turash-postgres) // // IMPORTANT: pgtestdb creates ISOLATED test databases - it does NOT touch production data! // - Connects to 'postgres' database (admin database) to CREATE new test databases // - Each test gets a unique temporary database (e.g., pgtestdb_abc123) // - Test databases are automatically DROPPED after each test completes // - Production database 'turash' is NEVER modified or accessed conf := pgtestdb.Config{ DriverName: "pgx", User: getEnv("POSTGRES_USER", "turash"), Password: getEnv("POSTGRES_PASSWORD", "turash123"), Host: getEnv("POSTGRES_HOST", "localhost"), Port: getEnv("POSTGRES_PORT", "5432"), Database: getEnv("POSTGRES_DB", "postgres"), // Connect to 'postgres' database to create test databases // When tests fail to clean up connections, force pgtestdb to terminate // any remaining open connections so it can drop the test databases and // reclaim disk space. This helps avoid "No space left on device" test // failures in constrained CI/dev environments. ForceTerminateConnections: true, Options: "sslmode=disable", } // Create migrator instance migrator := &GormMigrator{} // Check for available disk space before creating databases. // On low-disk systems (CI or local dev), creating many template DBs // can fail with "No space left on device". We fail early with a // helpful message so the developer can free space / clean old DBs. if free, err := getFreeDiskBytes("/"); err == nil { const minBytes = 200 * 1024 * 1024 // 200 MB if free < minBytes { t.Fatalf("not enough free disk space for test DB creation: %d bytes free (< %d). Please free space or configure an external test DB.", free, minBytes) } } // Create isolated test database with migrations // pgtestdb.New accepts any type that implements the TB interface methods // It will: // 1. Check if a template database exists (by Hash()) // 2. Create template if needed (runs Migrate function) // 3. Clone template for this test (fast, milliseconds) // 4. Return connection to cloned database sqlDB := pgtestdb.New(t, conf, migrator) // Check if PostGIS is already enabled or if geometry column exists // If geometry column exists, PostGIS was set up in the template var postgisEnabled bool var columnExists bool // Check if PostGIS extension exists if err := sqlDB.QueryRow("SELECT EXISTS(SELECT 1 FROM pg_extension WHERE extname = 'postgis')").Scan(&postgisEnabled); err != nil { postgisEnabled = false } // Check if geometry column exists (indicates PostGIS was set up) if err := sqlDB.QueryRow("SELECT EXISTS(SELECT 1 FROM information_schema.columns WHERE table_name = 'sites' AND column_name = 'location_geometry')").Scan(&columnExists); err != nil { columnExists = false } // If PostGIS is not enabled but column exists, try to enable PostGIS // This handles cases where template had PostGIS but cloned database doesn't inherit it if !postgisEnabled && columnExists { // Try to enable PostGIS - if it fails, that's OK since column already exists if err := enablePostGISExtension(sqlDB); err == nil { postgisEnabled = true } } else if !postgisEnabled && !columnExists { // PostGIS not enabled and column doesn't exist - try to enable PostGIS if err := enablePostGISExtension(sqlDB); err != nil { // Can't enable PostGIS - tests that need it will fail t.Logf("Warning: Failed to enable PostGIS extension: %v", err) } else { postgisEnabled = true } } // Convert to GORM DB for use in tests gormDB, err := gorm.Open(postgres.New(postgres.Config{ Conn: sqlDB, }), &gorm.Config{ Logger: logger.Default.LogMode(logger.Silent), }) if err != nil { t.Fatalf("Failed to create GORM DB: %v", err) } // Run PostGIS migrations if PostGIS is enabled // This ensures geometry columns exist even if they weren't in the template if postgisEnabled { if err := domain.RunPostGISMigrations(gormDB); err != nil { // PostGIS migrations failed - this is critical for spatial operations // Fail the test setup rather than continuing with broken database t.Fatalf("Failed to run PostGIS migrations: %v", err) } } return gormDB } // testTB is a minimal interface that testcontainers can use type testTB interface { Helper() Cleanup(func()) Logf(format string, args ...interface{}) Fatalf(format string, args ...interface{}) Failed() bool Name() string } // SetupTestDBWithTestcontainers creates an isolated PostgreSQL database for testing using testcontainers // This spins up a PostgreSQL container for each test, providing perfect isolation // Recommended for CI environments where Docker is available // // Example usage: // // func TestWithTestcontainers(t *testing.T) { // db := testutils.SetupTestDBWithTestcontainers(t) // // Your test code here - will have its own PostgreSQL container // } func SetupTestDBWithTestcontainers(t testTB) *gorm.DB { t.Helper() ctx := context.Background() // Start PostgreSQL container with PostGIS support pgContainer, err := tcpostgres.Run(ctx, "postgis/postgis:15-3.4-alpine", // Use PostGIS-enabled image tcpostgres.WithDatabase("testdb"), tcpostgres.WithUsername("testuser"), tcpostgres.WithPassword("testpass"), testcontainers.WithWaitStrategy( wait.ForLog("database system is ready to accept connections"). WithOccurrence(2). WithStartupTimeout(30*time.Second)), // Longer timeout for PostGIS ) if err != nil { t.Fatalf("Failed to start PostgreSQL container: %v", err) } t.Cleanup(func() { if err := pgContainer.Terminate(ctx); err != nil { t.Logf("Failed to terminate PostgreSQL container: %v", err) } }) // Get connection string directly from container dsn, err := pgContainer.ConnectionString(ctx, "sslmode=disable") if err != nil { t.Fatalf("Failed to get connection string: %v", err) } // Connect to database sqlDB, err := sql.Open("pgx", dsn) if err != nil { t.Fatalf("Failed to connect to database: %v", err) } // Verify PostGIS is available if err := enablePostGISExtension(sqlDB); err != nil { t.Logf("Warning: Failed to enable PostGIS: %v", err) } // Create GORM connection gormDB, err := gorm.Open(postgres.New(postgres.Config{ Conn: sqlDB, }), &gorm.Config{ Logger: logger.Default.LogMode(logger.Silent), }) if err != nil { t.Fatalf("Failed to create GORM connection: %v", err) } // Run migrations if err := domain.AutoMigrate(gormDB); err != nil { t.Fatalf("Failed to run migrations: %v", err) } // Try to enable PostGIS extensions if err := domain.RunPostGISMigrations(gormDB); err != nil { t.Logf("Warning: PostGIS migrations failed (expected in test environment): %v", err) } return gormDB } // cleanupGeometryColumns removes geometry-related columns and constraints when PostGIS is not available func cleanupGeometryColumns(db *gorm.DB) error { // Only run on PostgreSQL if db.Dialector.Name() != "postgres" { return nil } // Drop constraints first (they reference the columns) constraints := []string{ "ALTER TABLE sites DROP CONSTRAINT IF EXISTS check_location_geometry", "ALTER TABLE sites DROP CONSTRAINT IF EXISTS chk_sites_footprint_geometry", "ALTER TABLE geographical_features DROP CONSTRAINT IF EXISTS chk_geographical_features_geometry", } for _, constraint := range constraints { if err := db.Exec(constraint).Error; err != nil { // Log but continue - constraint might not exist fmt.Printf("Warning: failed to drop constraint: %v\n", err) } } // Drop geometry columns columns := []string{ "ALTER TABLE sites DROP COLUMN IF EXISTS location_geometry", "ALTER TABLE sites DROP COLUMN IF EXISTS footprint_geometry", "ALTER TABLE sites DROP COLUMN IF EXISTS geometry_wkt", "ALTER TABLE geographical_features DROP COLUMN IF EXISTS geometry", } for _, column := range columns { if err := db.Exec(column).Error; err != nil { // Log but continue - column might not exist fmt.Printf("Warning: failed to drop geometry column: %v\n", err) } } // Drop spatial indexes indexes := []string{ "DROP INDEX IF EXISTS idx_site_geometry", "DROP INDEX IF EXISTS idx_sites_footprint_geometry", "DROP INDEX IF EXISTS idx_geographical_features_geometry", } for _, index := range indexes { if err := db.Exec(index).Error; err != nil { // Log but continue - index might not exist fmt.Printf("Warning: failed to drop geometry index: %v\n", err) } } return nil } // SetupTestDBWithTestcontainersForGinkgo creates an isolated PostgreSQL database for Ginkgo tests using testcontainers // Use this function in Ginkgo BeforeEach blocks when Docker is available // // Example usage: // // BeforeEach(func() { // db = testutils.SetupTestDBWithTestcontainersForGinkgo(GinkgoT()) // repo = repository.NewMyRepository(db) // }) func SetupTestDBWithTestcontainersForGinkgo(ginkgoT interface { Helper() Logf(format string, args ...interface{}) Fatalf(format string, args ...interface{}) Failed() bool }) *gorm.DB { // Create a minimal testing.TB implementation tb := &minimalTB{ginkgoT: ginkgoT} return SetupTestDBWithTestcontainers(tb) } // minimalTB provides a minimal implementation of testTB for testcontainers type minimalTB struct { ginkgoT interface { Helper() Logf(format string, args ...interface{}) Fatalf(format string, args ...interface{}) Failed() bool } cleanups []func() } func (m *minimalTB) Helper() { m.ginkgoT.Helper() } func (m *minimalTB) Cleanup(fn func()) { m.cleanups = append(m.cleanups, fn) } func (m *minimalTB) Logf(format string, args ...interface{}) { m.ginkgoT.Logf(format, args...) } func (m *minimalTB) Fatalf(format string, args ...interface{}) { m.ginkgoT.Fatalf(format, args...) } func (m *minimalTB) Failed() bool { return m.ginkgoT.Failed() } func (m *minimalTB) Name() string { return "GinkgoTest" } // getFreeDiskBytes returns number of free bytes available on the filesystem // containing the given path. Returns error if unable to determine. func getFreeDiskBytes(path string) (uint64, error) { var stat syscall.Statfs_t if err := syscall.Statfs(path, &stat); err != nil { return 0, err } // Available blocks * block size return stat.Bavail * uint64(stat.Bsize), nil }