Configure CI/CD pipeline with Harbor registry

This commit is contained in:
Damir Mukimov 2025-12-24 19:17:14 +01:00
parent 00aca957b3
commit 78d87ae345
No known key found for this signature in database
GPG Key ID: 42996CC7C73BC750
25 changed files with 2128 additions and 0 deletions

118
.woodpecker.yml Normal file
View File

@ -0,0 +1,118 @@
when:
- event: [push, pull_request]
- path: ["bugulma/frontend/**", ".woodpecker.yml"]
variables:
- &build_args "BUILDKIT_PROGRESS=plain,DOCKER_BUILDKIT=1"
- &harbor_registry "registry.bk.glpx.pro"
steps:
# Lint and test frontend
frontend-lint:
image: node:18-alpine
commands:
- cd bugulma/frontend
- yarn install --frozen-lockfile
- yarn lint
- yarn test --run
when:
path: "bugulma/frontend/**"
# Build and push frontend
frontend-build:
image: woodpeckerci/plugin-docker-buildx:latest
settings:
registry: *harbor_registry
repo: *harbor_registry/turash/turash-frontend
dockerfile: bugulma/frontend/Dockerfile
context: bugulma/frontend
platforms: linux/amd64,linux/arm64
build_args: *build_args
tags:
- latest
- ${CI_COMMIT_SHA}
secrets: [docker_username, docker_password]
when:
event: push
branch: master
path: "bugulma/frontend/**"
# Lint and test backend
backend-lint:
image: golang:1.21-alpine
commands:
- cd bugulma/backend
- go mod download
- go vet ./...
- go test -v -race -coverprofile=coverage.out ./...
- go tool cover -html=coverage.out -o coverage.html
when:
path: "bugulma/backend/**"
# Build and push backend
backend-build:
image: woodpeckerci/plugin-docker-buildx:latest
settings:
registry: *harbor_registry
repo: *harbor_registry/turash/turash-backend
dockerfile: bugulma/backend/Dockerfile
context: bugulma/backend
platforms: linux/amd64,linux/arm64
build_args: *build_args
tags:
- latest
- ${CI_COMMIT_SHA}
secrets: [docker_username, docker_password]
when:
event: push
branch: master
path: "bugulma/backend/**"
# Deploy to staging
deploy-staging:
image: woodpeckerci/plugin-kubectl:latest
commands:
- kubectl config set-cluster k3s --server=https://10.10.10.2:6443 --insecure-skip-tls-verify=true
- kubectl config set-credentials default --token=${KUBE_TOKEN}
- kubectl config set-context default --cluster=k3s --user=default
- kubectl config use-context default
# Deploy backend
- kubectl apply -f k8s/namespace.yaml
- kubectl apply -f k8s/configmap.yaml
- kubectl apply -f k8s/secret.yaml
- kubectl set image deployment/turash-backend backend=registry.bk.glpx.pro/turash/turash-backend:${CI_COMMIT_SHA}
- kubectl rollout status deployment/turash-backend -n turash
# Deploy frontend
- kubectl apply -f k8s/frontend-deployment.yaml
- kubectl apply -f k8s/frontend-service.yaml
- kubectl set image deployment/turash-frontend frontend=registry.bk.glpx.pro/turash/turash-frontend:${CI_COMMIT_SHA}
- kubectl rollout status deployment/turash-frontend -n turash
secrets: [kube_token]
when:
event: push
branch: master
path: ["bugulma/**", "k8s/**"]
# Run E2E tests
e2e-test:
image: mcr.microsoft.com/playwright:v1.40.0-jammy
commands:
- cd bugulma/frontend
- yarn install --frozen-lockfile
- yarn test:e2e --headed=false
when:
event: push
branch: master
path: "bugulma/frontend/**"
# Notify on failure
notify-failure:
image: alpine:latest
commands:
- echo "Pipeline failed for commit ${CI_COMMIT_SHA}"
- echo "Check logs at ${CI_SYSTEM_URL}/${CI_REPO}/${CI_PIPELINE_NUMBER}"
when:
status: failure

12
k8s/.gitignore vendored Normal file
View File

@ -0,0 +1,12 @@
# Secrets - never commit these!
secret.yaml
sealed-secret.yaml
*.secret.yaml
*.sealed.yaml
# Temporary files
*.tmp
*.bak
*.swp
harbor-secrets.yaml

334
k8s/README.md Normal file
View File

@ -0,0 +1,334 @@
# Turash Backend - Kubernetes Deployment
This directory contains Kubernetes manifests and ArgoCD configuration for deploying the Turash backend application.
## Architecture
The deployment includes:
- **Backend API**: Go application serving on port 8080
- **PostgreSQL + PostGIS**: Spatial database (managed separately or via StatefulSet)
- **Neo4j**: Graph database (managed separately or via StatefulSet)
- **Redis**: Caching layer (managed separately or via StatefulSet)
- **NATS**: Message queue (managed separately or via StatefulSet)
## Prerequisites
1. **Kubernetes Cluster** (k3s/k8s) with:
- ArgoCD installed and configured
- Ingress controller (nginx-ingress recommended)
- Metrics server (for HPA)
- Storage class for persistent volumes
2. **Container Registry**:
- Image should be built and pushed to `ghcr.io/samyrai/turash-backend:latest`
- Or update the image reference in `deployment.yaml`
3. **Secrets Management**:
- Create secrets using the template provided
- Use sealed-secrets, external-secrets, or your preferred solution
## Quick Start
### 1. Build and Push Docker Image
```bash
cd bugulma/backend
docker build -t ghcr.io/samyrai/turash-backend:latest -f Dockerfile .
docker push ghcr.io/samyrai/turash-backend:latest
```
### 2. Create Secrets
```bash
# Copy the template
cp k8s/secret.yaml.template k8s/secret.yaml
# Edit secret.yaml with your actual values
# IMPORTANT: Add secret.yaml to .gitignore!
# Create the secret
kubectl create secret generic turash-backend-secret \
--from-env-file=k8s/secret.yaml \
--namespace=turash \
--dry-run=client -o yaml | kubectl apply -f -
```
Or using sealed-secrets:
```bash
# Install kubeseal if not already installed
kubectl apply -f https://github.com/bitnami-labs/sealed-secrets/releases/download/v0.24.0/controller.yaml
# Create sealed secret
kubectl create secret generic turash-backend-secret \
--from-env-file=k8s/secret.yaml \
--namespace=turash \
--dry-run=client -o yaml | kubeseal -o yaml > k8s/sealed-secret.yaml
```
### 3. Deploy Infrastructure Services
The backend depends on PostgreSQL, Neo4j, Redis, and NATS. You can either:
**Option A: Use existing managed services**
- Update service names in `configmap.yaml` to point to your managed services
**Option B: Deploy via Helm charts**
```bash
# PostgreSQL with PostGIS
helm repo add bitnami https://charts.bitnami.com/bitnami
helm install turash-postgres bitnami/postgresql \
--namespace turash \
--set postgresqlDatabase=turash \
--set postgresqlUsername=turash \
--set postgresqlPassword=YOUR_PASSWORD \
--set image.tag=16-postgis
# Neo4j
helm repo add neo4j https://helm.neo4j.com/neo4j
helm install turash-neo4j neo4j/neo4j \
--namespace turash \
--set neo4j.password=YOUR_PASSWORD
# Redis
helm install turash-redis bitnami/redis \
--namespace turash \
--set auth.password=YOUR_PASSWORD
# NATS
helm repo add nats https://nats-io.github.io/k8s/helm/charts/
helm install turash-nats nats/nats \
--namespace turash
```
### 4. Deploy Application Manually
```bash
# Create namespace
kubectl apply -f k8s/namespace.yaml
# Apply ConfigMap
kubectl apply -f k8s/configmap.yaml
# Apply Secret (or sealed-secret)
kubectl apply -f k8s/secret.yaml
# Apply Deployment
kubectl apply -f k8s/deployment.yaml
# Apply Service
kubectl apply -f k8s/service.yaml
# Apply Ingress (optional)
kubectl apply -f k8s/ingress.yaml
# Apply HPA
kubectl apply -f k8s/hpa.yaml
# Apply PDB
kubectl apply -f k8s/pdb.yaml
```
### 5. Deploy via ArgoCD
#### Add Repository to ArgoCD
```bash
argocd repo add https://github.com/SamyRai/turash.git \
--name turash \
--type git
```
#### Create Project (Optional)
```bash
kubectl apply -f k8s/argocd/project.yaml
```
#### Create Application
```bash
kubectl apply -f k8s/argocd/application.yaml
```
Or via ArgoCD CLI:
```bash
argocd app create turash-backend \
--repo https://github.com/SamyRai/turash.git \
--path k8s \
--dest-server https://kubernetes.default.svc \
--dest-namespace turash \
--project default \
--sync-policy automated \
--self-heal \
--auto-prune
```
#### Sync Application
```bash
argocd app sync turash-backend
```
## Configuration
### Environment Variables
The application uses the following environment variables (configured via ConfigMap and Secret):
**ConfigMap (non-sensitive):**
- `SERVER_PORT`: Server port (default: 8080)
- `GIN_MODE`: Gin framework mode (release/debug)
- `LOG_LEVEL`: Logging level (info/debug)
- `POSTGRES_HOST`: PostgreSQL hostname
- `POSTGRES_PORT`: PostgreSQL port
- `POSTGRES_DB`: Database name
- `NEO4J_URI`: Neo4j connection URI
- `REDIS_URL`: Redis connection URL
- `NATS_URL`: NATS connection URL
**Secret (sensitive):**
- `JWT_SECRET`: JWT signing secret
- `POSTGRES_USER`: PostgreSQL username
- `POSTGRES_PASSWORD`: PostgreSQL password
- `NEO4J_PASSWORD`: Neo4j password
- `REDIS_PASSWORD`: Redis password
### Resource Limits
Default resource requests/limits:
- Requests: 256Mi memory, 100m CPU
- Limits: 512Mi memory, 500m CPU
Adjust in `deployment.yaml` based on your workload.
### Scaling
The HPA is configured to:
- Scale between 2-10 replicas
- Target 70% CPU utilization
- Target 80% memory utilization
Adjust in `hpa.yaml` as needed.
## Monitoring
### Health Checks
The application exposes a `/health` endpoint used for:
- Liveness probe: Checks every 10s after 30s initial delay
- Readiness probe: Checks every 5s after 10s initial delay
### Metrics
If Prometheus is installed, metrics are exposed at `/metrics` endpoint.
## Troubleshooting
### Check Pod Status
```bash
kubectl get pods -n turash
kubectl describe pod <pod-name> -n turash
kubectl logs <pod-name> -n turash
```
### Check Service
```bash
kubectl get svc -n turash
kubectl describe svc turash-backend -n turash
```
### Check Ingress
```bash
kubectl get ingress -n turash
kubectl describe ingress turash-backend-ingress -n turash
```
### Port Forward for Testing
```bash
kubectl port-forward -n turash svc/turash-backend 8080:80
curl http://localhost:8080/health
```
### Check ArgoCD Application
```bash
argocd app get turash-backend
argocd app logs turash-backend
argocd app history turash-backend
```
## Database Migrations
Run migrations using the CLI tool:
```bash
# Port forward to backend pod
kubectl port-forward -n turash deployment/turash-backend 8080:8080
# Or exec into pod
kubectl exec -it -n turash deployment/turash-backend -- ./bugulma-cli migrate up
```
## CI/CD Integration
### GitHub Actions Example
```yaml
name: Build and Deploy
on:
push:
branches: [master]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
- name: Build Docker image
run: |
docker build -t ghcr.io/samyrai/turash-backend:${{ github.sha }} -f bugulma/backend/Dockerfile bugulma/backend
docker push ghcr.io/samyrai/turash-backend:${{ github.sha }}
- name: Update deployment image
run: |
sed -i "s|image:.*|image: ghcr.io/samyrai/turash-backend:${{ github.sha }}|" k8s/deployment.yaml
git commit -am "Update image to ${{ github.sha }}"
git push
```
ArgoCD will automatically sync the updated deployment.
## Security Considerations
1. **Secrets**: Never commit secrets to git. Use sealed-secrets or external-secrets.
2. **Image Security**: Scan images for vulnerabilities before deployment.
3. **Network Policies**: Consider adding NetworkPolicies to restrict pod-to-pod communication.
4. **RBAC**: Configure appropriate RBAC rules for service accounts.
5. **TLS**: Enable TLS for ingress and use cert-manager for automatic certificate management.
## Production Checklist
- [ ] Update all default passwords in secrets
- [ ] Configure TLS certificates for ingress
- [ ] Set up monitoring and alerting
- [ ] Configure backup strategy for databases
- [ ] Review and adjust resource limits
- [ ] Set up log aggregation
- [ ] Configure network policies
- [ ] Enable pod security policies
- [ ] Set up CI/CD pipeline
- [ ] Document runbooks for common issues
## Support
For issues or questions:
- Check application logs: `kubectl logs -n turash deployment/turash-backend`
- Check ArgoCD sync status: `argocd app get turash-backend`
- Review Kubernetes events: `kubectl get events -n turash --sort-by='.lastTimestamp'`

View File

@ -0,0 +1,50 @@
apiVersion: argoproj.io/v1alpha1
kind: Application
metadata:
name: turash-backend
namespace: argocd
labels:
app: turash-backend
environment: production
finalizers:
- resources-finalizer.argocd.argoproj.io
spec:
project: default
source:
repoURL: https://github.com/SamyRai/turash.git
targetRevision: master
path: k8s
directory:
recurse: true
include: "*.yaml"
exclude: "*.template"
destination:
server: https://kubernetes.default.svc
namespace: turash
syncPolicy:
automated:
prune: true
selfHeal: true
allowEmpty: false
syncOptions:
- CreateNamespace=true
- PrunePropagationPolicy=foreground
- PruneLast=true
retry:
limit: 5
backoff:
duration: 5s
factor: 2
maxDuration: 3m
revisionHistoryLimit: 10
ignoreDifferences:
- group: apps
kind: Deployment
jsonPointers:
- /spec/replicas
- group: autoscaling
kind: HorizontalPodAutoscaler
jsonPointers:
- /spec/minReplicas
- /spec/maxReplicas

40
k8s/argocd/project.yaml Normal file
View File

@ -0,0 +1,40 @@
apiVersion: argoproj.io/v1alpha1
kind: AppProject
metadata:
name: turash
namespace: argocd
labels:
name: turash
spec:
description: Turash Backend Application Project
sourceRepos:
- 'https://github.com/SamyRai/turash.git'
- '*'
destinations:
- namespace: turash
server: https://kubernetes.default.svc
- namespace: '*'
server: https://kubernetes.default.svc
clusterResourceWhitelist:
- group: '*'
kind: '*'
namespaceResourceWhitelist:
- group: '*'
kind: '*'
roles:
- name: admin
description: Admin role for Turash project
policies:
- p, proj:turash:admin, applications, *, turash/*, allow
- p, proj:turash:admin, repositories, *, *, allow
- p, proj:turash:admin, clusters, *, *, allow
groups:
- turash-admins
- name: developer
description: Developer role for Turash project
policies:
- p, proj:turash:developer, applications, get, turash/*, allow
- p, proj:turash:developer, applications, sync, turash/*, allow
groups:
- turash-developers

44
k8s/configmap.yaml Normal file
View File

@ -0,0 +1,44 @@
apiVersion: v1
kind: ConfigMap
metadata:
name: turash-backend-config
namespace: turash
labels:
app: turash-backend
component: config
data:
# Server Configuration
SERVER_PORT: "8080"
GIN_MODE: "release"
LOG_LEVEL: "info"
ENVIRONMENT: "production"
# CORS Configuration
CORS_ORIGIN: "*"
# PostgreSQL Configuration (hosts will be overridden by service names)
POSTGRES_HOST: "turash-postgres"
POSTGRES_PORT: "5432"
POSTGRES_DB: "turash"
POSTGRES_SSLMODE: "require"
# Neo4j Configuration
NEO4J_URI: "neo4j://turash-neo4j:7687"
NEO4J_USERNAME: "neo4j"
NEO4J_DATABASE: "neo4j"
NEO4J_ENABLED: "true"
# Redis Configuration
REDIS_URL: "redis://turash-redis:6379"
# NATS Configuration
NATS_URL: "nats://turash-nats:4222"
# Ollama Configuration (optional)
OLLAMA_URL: "http://turash-ollama:11434"
OLLAMA_MODEL: "qwen2.5:7b"
# Google Maps API (optional - set if needed)
# GOOGLE_KG_API_KEY: ""
# GOOGLE_CLOUD_PROJECT_ID: ""

88
k8s/deployment.yaml Normal file
View File

@ -0,0 +1,88 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: turash-backend
namespace: turash
labels:
app: turash-backend
component: backend
version: v1
spec:
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
selector:
matchLabels:
app: turash-backend
template:
metadata:
labels:
app: turash-backend
component: backend
version: v1
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "8080"
prometheus.io/path: "/metrics"
spec:
containers:
- name: backend
image: registry.bk.glpx.pro/turash/turash-backend:latest
imagePullPolicy: Always
ports:
- name: http
containerPort: 8080
protocol: TCP
envFrom:
- configMapRef:
name: turash-backend-config
- secretRef:
name: turash-backend-secret
env:
# Override PostgreSQL DSN with secret values
- name: POSTGRES_DSN
valueFrom:
secretKeyRef:
name: turash-backend-secret
key: POSTGRES_DSN
optional: true
# Construct PostgreSQL DSN if not provided
- name: POSTGRES_DSN
value: "host=$(POSTGRES_HOST) port=$(POSTGRES_PORT) user=$(POSTGRES_USER) password=$(POSTGRES_PASSWORD) dbname=$(POSTGRES_DB) sslmode=$(POSTGRES_SSLMODE)"
resources:
requests:
memory: "256Mi"
cpu: "100m"
limits:
memory: "512Mi"
cpu: "500m"
livenessProbe:
httpGet:
path: /health
port: http
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
httpGet:
path: /health
port: http
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 3
securityContext:
runAsNonRoot: true
runAsUser: 1000
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
restartPolicy: Always
terminationGracePeriodSeconds: 30

View File

@ -0,0 +1,76 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: turash-frontend
namespace: turash
labels:
app: turash-frontend
component: frontend
version: v1
spec:
replicas: 2
strategy:
type: RollingUpdate
rollingUpdate:
maxSurge: 1
maxUnavailable: 0
selector:
matchLabels:
app: turash-frontend
template:
metadata:
labels:
app: turash-frontend
component: frontend
version: v1
annotations:
prometheus.io/scrape: "true"
prometheus.io/port: "80"
prometheus.io/path: "/health"
spec:
containers:
- name: frontend
image: registry.bk.glpx.pro/turash/turash-frontend:latest
imagePullPolicy: Always
ports:
- name: http
containerPort: 80
protocol: TCP
env:
- name: VITE_API_BASE_URL
value: "https://turash-api.bk.glpx.pro"
- name: VITE_ENVIRONMENT
value: "production"
resources:
requests:
memory: "128Mi"
cpu: "50m"
limits:
memory: "256Mi"
cpu: "200m"
livenessProbe:
httpGet:
path: /health
port: http
initialDelaySeconds: 30
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
readinessProbe:
httpGet:
path: /health
port: http
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 3
failureThreshold: 3
securityContext:
runAsNonRoot: true
runAsUser: 101 # nginx user
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
capabilities:
drop:
- ALL
restartPolicy: Always
terminationGracePeriodSeconds: 30

45
k8s/frontend-hpa.yaml Normal file
View File

@ -0,0 +1,45 @@
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: turash-frontend-hpa
namespace: turash
labels:
app: turash-frontend
component: autoscaling
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: turash-frontend
minReplicas: 1
maxReplicas: 5
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 60
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: 70
behavior:
scaleDown:
stabilizationWindowSeconds: 300
policies:
- type: Percent
value: 50
periodSeconds: 60
scaleUp:
stabilizationWindowSeconds: 0
policies:
- type: Percent
value: 100
periodSeconds: 60
- type: Pods
value: 2
periodSeconds: 60
selectPolicy: Max

34
k8s/frontend-ingress.yaml Normal file
View File

@ -0,0 +1,34 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: turash-frontend-ingress
namespace: turash
labels:
app: turash-frontend
component: ingress
annotations:
# Traefik annotations
traefik.ingress.kubernetes.io/router.entrypoints: websecure
# SSL/TLS configuration
cert-manager.io/cluster-issuer: letsencrypt-prod
# Redirect to HTTPS
traefik.ingress.kubernetes.io/redirect-entrypoint: websecure
# Enable compression
traefik.ingress.kubernetes.io/router.middlewares: default-compress@kubernetescrd
spec:
ingressClassName: traefik
rules:
- host: turash.bk.glpx.pro
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: turash-frontend
port:
number: 80
tls:
- hosts:
- turash.bk.glpx.pro
secretName: turash-frontend-tls

18
k8s/frontend-service.yaml Normal file
View File

@ -0,0 +1,18 @@
apiVersion: v1
kind: Service
metadata:
name: turash-frontend
namespace: turash
labels:
app: turash-frontend
component: frontend
spec:
type: ClusterIP
ports:
- name: http
port: 80
targetPort: http
protocol: TCP
selector:
app: turash-frontend
sessionAffinity: None

46
k8s/hpa.yaml Normal file
View File

@ -0,0 +1,46 @@
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: turash-backend-hpa
namespace: turash
labels:
app: turash-backend
component: autoscaling
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: turash-backend
minReplicas: 2
maxReplicas: 10
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: 70
- type: Resource
resource:
name: memory
target:
type: Utilization
averageUtilization: 80
behavior:
scaleDown:
stabilizationWindowSeconds: 300
policies:
- type: Percent
value: 50
periodSeconds: 60
scaleUp:
stabilizationWindowSeconds: 0
policies:
- type: Percent
value: 100
periodSeconds: 60
- type: Pods
value: 2
periodSeconds: 60
selectPolicy: Max

149
k8s/ingress-domains.md Normal file
View File

@ -0,0 +1,149 @@
# Ingress Domain Configuration
## Current Cluster Setup
### Ingress Controller
- **Type**: Traefik
- **Ingress Class**: `traefik` (default)
- **Service**: `traefik` in `kube-system` namespace
- **Port**: 80 (NodePort: 32080)
### Domain Pattern
All services use the pattern: `*.bk.glpx.pro`
### Existing Domains
| Service | Domain | Namespace | TLS | Notes |
|---------|--------|-----------|-----|-------|
| ArgoCD | `argocd.bk.glpx.pro` | argocd | ✅ (letsencrypt-prod) | Cert-manager managed |
| Rancher | `rancher.bk.glpx.pro` | cattle-system | ✅ | Rancher managed |
| Code Server | `code.bk.glpx.pro` | code-server | ✅ (letsencrypt-prod) | Cert-manager managed |
| Redis Commander | `redis.bk.glpx.pro` | infra | ✅ (letsencrypt-prod) | Cert-manager managed |
| Storage | `storage.bk.glpx.pro` | just-storage | ❌ | HTTP only |
| OAuth2 Proxy | `login.bk.glpx.pro` | kube-system | ❌ | HTTP only |
| OCR Service | `ocr.bk.glpx.pro` | kube-system | ❌ | HTTP only |
| Woodpecker | `woodpecker.bk.glpx.pro` | woodpecker | ✅ (letsencrypt-prod) | Cert-manager managed |
| **Turash API** | `turash-api.bk.glpx.pro` | turash | ✅ (letsencrypt-prod) | Planned |
### Turash Backend Domain
**Current**: `turash-api.bk.glpx.pro`
This follows the existing pattern while being specific about the service. Alternative options considered:
- `api.turash.bk.glpx.pro`
- `turash-api.bk.glpx.pro`
- `backend.turash.bk.glpx.pro`
## TLS Configuration
### Cert-Manager
- **Cluster Issuer**: `letsencrypt-prod`
- **Automatic TLS**: Enabled via annotation `cert-manager.io/cluster-issuer: letsencrypt-prod`
- **Certificate Secret**: Automatically created by cert-manager
### Ingress Annotations for Traefik
```yaml
annotations:
# Use secure entrypoint (HTTPS)
traefik.ingress.kubernetes.io/router.entrypoints: websecure
# Enable TLS with cert-manager
cert-manager.io/cluster-issuer: letsencrypt-prod
# Optional: Add middleware for CORS, rate limiting, etc.
traefik.ingress.kubernetes.io/router.middlewares: default-cors@kubernetescrd
```
## Traefik vs Nginx
**Important**: The cluster uses **Traefik**, not nginx-ingress!
### Differences:
1. **Ingress Class**: Use `traefik` instead of `nginx`
2. **Annotations**: Use `traefik.ingress.kubernetes.io/*` instead of `nginx.ingress.kubernetes.io/*`
3. **Entrypoints**: Traefik uses `web` (HTTP) and `websecure` (HTTPS)
4. **Middleware**: Traefik uses Middleware CRDs for advanced features
### Common Traefik Annotations
```yaml
# Entrypoints
traefik.ingress.kubernetes.io/router.entrypoints: websecure
# Middleware
traefik.ingress.kubernetes.io/router.middlewares: namespace-middleware@kubernetescrd
# TLS
traefik.ingress.kubernetes.io/router.tls: "true"
# Redirect to HTTPS
traefik.ingress.kubernetes.io/redirect-entrypoint: websecure
```
## DNS Configuration
For local development or if DNS is not configured:
1. **Add to `/etc/hosts`** (Linux/macOS):
```
10.10.10.2 turash-api.bk.glpx.pro
```
2. **Or use NodePort directly**:
```
http://10.10.10.2:32080
```
## Testing Ingress
```bash
# Check ingress status
kubectl get ingress -n turash
# Test with curl
curl -H "Host: turash-api.bk.glpx.pro" http://10.10.10.2:32080/health
# Test with proper domain (if DNS configured)
curl https://turash-api.bk.glpx.pro/health
```
## Troubleshooting
### Ingress not working?
1. Check ingress status:
```bash
kubectl describe ingress turash-backend-ingress -n turash
```
2. Check Traefik logs:
```bash
kubectl logs -n kube-system -l app.kubernetes.io/name=traefik
```
3. Verify service:
```bash
kubectl get svc turash-backend -n turash
```
4. Check certificate status:
```bash
kubectl get certificate -n turash
kubectl describe certificate turash-backend-tls -n turash
```
### Certificate issues?
1. Check cert-manager:
```bash
kubectl get clusterissuer letsencrypt-prod
kubectl get certificaterequest -n turash
```
2. Check certificate secret:
```bash
kubectl get secret turash-backend-tls -n turash
```

34
k8s/ingress.yaml Normal file
View File

@ -0,0 +1,34 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: turash-backend-ingress
namespace: turash
labels:
app: turash-backend
component: ingress
annotations:
# Traefik annotations
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.middlewares: default-cors@kubernetescrd
# SSL/TLS configuration
cert-manager.io/cluster-issuer: letsencrypt-prod
# CORS configuration (handled by middleware or backend)
traefik.ingress.kubernetes.io/router.tls: "true"
spec:
ingressClassName: traefik
rules:
- host: turash-api.bk.glpx.pro
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: turash-backend
port:
number: 80
tls:
- hosts:
- turash-api.bk.glpx.pro
secretName: turash-backend-tls

9
k8s/namespace.yaml Normal file
View File

@ -0,0 +1,9 @@
apiVersion: v1
kind: Namespace
metadata:
name: turash
labels:
name: turash
app: turash-backend
environment: production

14
k8s/pdb.yaml Normal file
View File

@ -0,0 +1,14 @@
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: turash-backend-pdb
namespace: turash
labels:
app: turash-backend
component: availability
spec:
minAvailable: 1
selector:
matchLabels:
app: turash-backend

View File

@ -0,0 +1,262 @@
# Harbor Registry Integration Complete ✅
## Registry Access
- **URL**: https://registry.bk.glpx.pro
- **Registry Endpoint**: `registry.bk.glpx.pro`
- **Username**: `admin`
- **Password**: `nVbR0IZv02zdZaM1zqjOz8FVbdzmXlEUaOb59D5Bkz0=`
## Local Docker Configuration
### Login to Harbor
```bash
docker login registry.bk.glpx.pro -u admin -p "nVbR0IZv02zdZaM1zqjOz8FVbdzmXlEUaOb59D5Bkz0="
```
### Test Push/Pull
```bash
# Pull test image
docker pull alpine:latest
# Tag for Harbor
docker tag alpine:latest registry.bk.glpx.pro/turash/test:latest
# Push to Harbor
docker push registry.bk.glpx.pro/turash/test:latest
```
## Woodpecker CI/CD Configuration
### Registry Configuration
**Registry Added**: `registry.bk.glpx.pro`
- Hostname: `registry.bk.glpx.pro`
- Username: `admin`
- Password: Configured
### Secrets Configured
**Docker Credentials**:
- `docker_username`: `admin`
- `docker_password`: `nVbR0IZv02zdZaM1zqjOz8FVbdzmXlEUaOb59D5Bkz0=`
### Verify Configuration
```bash
# List registries
woodpecker-cli repo registry ls SamyRai/turash
# List secrets
woodpecker-cli repo secret ls SamyRai/turash
```
### Pipeline Usage
The `.woodpecker.yml` is already configured to use Harbor:
```yaml
variables:
- &harbor_registry "registry.bk.glpx.pro"
steps:
frontend-build:
settings:
registry: *harbor_registry
repo: *harbor_registry/turash/turash-frontend
secrets: [docker_username, docker_password]
```
## ArgoCD Configuration
### Image Pull Secrets
**Secret Created**: `harbor-registry-argocd` in `argocd` namespace
**Service Accounts Updated**:
- `argocd-repo-server`
- `argocd-application-controller`
- `argocd-server`
### Application Configuration
ArgoCD applications will automatically use Harbor registry for image pulls.
### Verify Configuration
```bash
# Check secret exists
kubectl get secret harbor-registry-argocd -n argocd
# Check service account
kubectl get serviceaccount argocd-repo-server -n argocd -o yaml | grep imagePullSecrets
```
## Kubernetes Application Configuration
### Turash Namespace
**Secret Created**: `harbor-registry-secret` in `turash` namespace
**Service Account Updated**: `default` service account patched
### Deployment Images
All deployments are configured to pull from Harbor:
- Backend: `registry.bk.glpx.pro/turash/turash-backend:latest`
- Frontend: `registry.bk.glpx.pro/turash/turash-frontend:latest`
## Harbor Project Setup
### Create Project in Harbor UI
1. Login to https://registry.bk.glpx.pro
2. Go to **Projects** → **New Project**
3. Create project: `turash`
4. Set as **Public** (or configure access as needed)
### Project Settings
- **Name**: `turash`
- **Public**: Yes (for CI/CD access)
- **Vulnerability Scanning**: Enabled (Trivy)
- **Content Trust**: Optional
## Complete CI/CD Flow
### 1. Code Push Triggers Woodpecker
```bash
git push origin master
```
### 2. Woodpecker Pipeline
- Lints and tests code
- Builds Docker images
- Pushes to Harbor: `registry.bk.glpx.pro/turash/turash-{frontend|backend}:{tag}`
### 3. ArgoCD Sync
- Monitors Git repository
- Detects new image tags
- Deploys to Kubernetes
- Pulls images from Harbor
### 4. Application Access
- Frontend: https://turash.bk.glpx.pro
- Backend API: https://turash-api.bk.glpx.pro
## Verification Commands
### Check Harbor Status
```bash
# Harbor pods
kubectl get pods -n harbor
# Harbor ingress
kubectl get ingress -n harbor
# Test Harbor API
curl -k https://registry.bk.glpx.pro/api/v2.0/health
```
### Check Woodpecker Integration
```bash
# List registries
woodpecker-cli repo registry ls SamyRai/turash
# Test pipeline
woodpecker-cli pipeline start SamyRai/turash
```
### Check ArgoCD Integration
```bash
# List applications
argocd app list
# Check image pull secrets
kubectl get serviceaccount -n argocd -o yaml | grep imagePullSecrets
```
### Check Kubernetes Integration
```bash
# Verify secrets
kubectl get secret harbor-registry-secret -n turash
# Check service account
kubectl get serviceaccount default -n turash -o yaml | grep imagePullSecrets
# Test image pull
kubectl run test-pull --image=registry.bk.glpx.pro/turash/test:latest --rm -it --restart=Never -n turash
```
## Troubleshooting
### Harbor Login Fails
```bash
# Check Harbor is running
kubectl get pods -n harbor | grep core
# Check ingress
kubectl get ingress -n harbor
# Test connectivity
curl -k https://registry.bk.glpx.pro
```
### Woodpecker Can't Push
```bash
# Verify registry credentials
woodpecker-cli repo registry ls SamyRai/turash
# Check secrets
woodpecker-cli repo secret ls SamyRai/turash
# Test manually
docker login registry.bk.glpx.pro -u admin -p "nVbR0IZv02zdZaM1zqjOz8FVbdzmXlEUaOb59D5Bkz0="
```
### ArgoCD Can't Pull Images
```bash
# Check image pull secret
kubectl get secret harbor-registry-argocd -n argocd
# Verify service account
kubectl describe serviceaccount argocd-repo-server -n argocd
# Check pod events
kubectl describe pod <pod-name> -n <namespace> | grep -i "pull\|image"
```
## Security Notes
⚠️ **IMPORTANT**:
- Passwords are stored in Kubernetes secrets
- Change default passwords in production
- Use RBAC to restrict Harbor access
- Enable audit logging
- Configure retention policies
- Use private projects for sensitive images
## Next Steps
1. ✅ Harbor deployed and accessible
2. ✅ Database configured and migrated
3. ✅ Woodpecker configured
4. ✅ ArgoCD configured
5. ✅ Kubernetes configured
6. ⏭️ Create Harbor project `turash`
7. ⏭️ Push first images via Woodpecker
8. ⏭️ Deploy via ArgoCD
**Everything is ready for CI/CD!** 🚀

134
k8s/registry/README.md Normal file
View File

@ -0,0 +1,134 @@
# Harbor Container Registry Setup
## Overview
Harbor is deployed as a production-ready container registry using:
- **External PostgreSQL**: Uses existing `infra-postgres-rw` service in `data` namespace
- **Internal Redis**: Deployed within Harbor namespace
- **Longhorn Fast Storage**: All persistent volumes use `longhorn-fast` storage class
- **Traefik Ingress**: Accessible at `https://registry.bk.glpx.pro`
## Configuration
### Database Connection
Harbor uses the external PostgreSQL database:
- **Host**: `infra-postgres-rw.data.svc.cluster.local`
- **Port**: `5432`
- **Database**: `harbor`
- **Username**: `app`
- **Password**: Stored in `infra-postgres-credentials` secret in `data` namespace
### Storage
All components use `longhorn-fast` storage class:
- **Registry**: 50Gi
- **Job Service**: 1Gi
- **Redis**: 2Gi
- **Trivy**: 5Gi
### Access
- **Web UI**: https://registry.bk.glpx.pro
- **Default Admin**: `admin` / `Harbor12345!` (CHANGE IN PRODUCTION!)
- **Registry Endpoint**: `registry.bk.glpx.pro`
## Integration with Woodpecker
### Configure Registry in Woodpecker
```bash
# Add Harbor registry to Woodpecker repository
woodpecker-cli repo registry add <repo-id> \
--hostname registry.bk.glpx.pro \
--username admin \
--password Harbor12345!
```
### Use in Woodpecker Pipeline
```yaml
steps:
build:
image: woodpeckerci/plugin-docker-buildx
settings:
registry: registry.bk.glpx.pro
repo: registry.bk.glpx.pro/turash/backend
tags: [latest, ${CI_COMMIT_SHA}]
secrets: [docker_username, docker_password]
```
## Integration with ArgoCD
ArgoCD can pull images from Harbor. Configure image pull secrets:
```bash
# Create registry secret
kubectl create secret docker-registry harbor-registry-secret \
--docker-server=registry.bk.glpx.pro \
--docker-username=admin \
--docker-password=Harbor12345! \
--namespace=turash
# Add to service account
kubectl patch serviceaccount default -n turash \
-p '{"imagePullSecrets":[{"name":"harbor-registry-secret"}]}'
```
## Production Checklist
- [ ] Change `harborAdminPassword` to strong password
- [ ] Change `secretKey` to secure random key
- [ ] Enable SSL/TLS for database connection
- [ ] Configure backup strategy for Harbor data
- [ ] Set up monitoring and alerting
- [ ] Configure retention policies for images
- [ ] Enable vulnerability scanning (Trivy)
- [ ] Set up replication for high availability
## Troubleshooting
### Check Harbor Status
```bash
kubectl get pods -n harbor
kubectl logs -n harbor deployment/harbor-core
```
### Test Database Connection
```bash
kubectl exec -it -n harbor deployment/harbor-core -- \
psql -h infra-postgres-rw.data.svc.cluster.local -U app -d harbor
```
### Check Registry Access
```bash
# Login to registry
docker login registry.bk.glpx.pro -u admin -p Harbor12345!
# Test push/pull
docker pull alpine:latest
docker tag alpine:latest registry.bk.glpx.pro/turash/test:latest
docker push registry.bk.glpx.pro/turash/test:latest
```
## Security Notes
⚠️ **IMPORTANT**: The current configuration uses default passwords. For production:
1. Generate strong passwords:
```bash
openssl rand -base64 32 # For harborAdminPassword
openssl rand -base64 32 # For secretKey
```
2. Store secrets in Kubernetes secrets or external secret management
3. Enable RBAC and configure proper access controls
4. Enable audit logging
5. Configure network policies to restrict access

View File

@ -0,0 +1,116 @@
# Harbor Registry Setup Complete ✅
## Access Information
### Harbor Web UI
- **URL**: https://registry.bk.glpx.pro
- **Username**: `admin`
- **Password**: `nVbR0IZv02zdZaM1zqjOz8FVbdzmXlEUaOb59D5Bkz0=`
### Registry Endpoint
- **Registry**: `registry.bk.glpx.pro`
- **Docker Login**:
```bash
docker login registry.bk.glpx.pro -u admin -p "nVbR0IZv02zdZaM1zqjOz8FVbdzmXlEUaOb59D5Bkz0="
```
## Configuration Summary
### Database
- **Type**: External PostgreSQL
- **Host**: `infra-postgres-rw.data.svc.cluster.local`
- **Database**: `harbor`
- **Username**: `app`
### Storage
- **Storage Class**: `longhorn-fast`
- **Registry**: 50Gi
- **Job Service**: 1Gi
- **Redis**: 2Gi
- **Trivy**: 5Gi
### Security
- **Admin Password**: Generated secure password
- **Secret Key**: Generated secure key
- **TLS**: Enabled via cert-manager
## Next Steps
### 1. Create Harbor Project
1. Login to https://registry.bk.glpx.pro
2. Go to **Projects** → **New Project**
3. Create project: `turash`
4. Set as **Public** (or configure access as needed)
### 2. Configure Woodpecker
```bash
# Add registry to Woodpecker repository
woodpecker-cli repo registry add <repo-id> \
--hostname registry.bk.glpx.pro \
--username admin \
--password "nVbR0IZv02zdZaM1zqjOz8FVbdzmXlEUaOb59D5Bkz0="
# Add secrets
woodpecker-cli repo secret add <repo-id> \
--name docker_username \
--value admin
woodpecker-cli repo secret add <repo-id> \
--name docker_password \
--value "nVbR0IZv02zdZaM1zqjOz8FVbdzmXlEUaOb59D5Bkz0="
```
### 3. Test Registry Access
```bash
# Login
docker login registry.bk.glpx.pro -u admin -p "nVbR0IZv02zdZaM1zqjOz8FVbdzmXlEUaOb59D5Bkz0="
# Pull test image
docker pull alpine:latest
# Tag for Harbor
docker tag alpine:latest registry.bk.glpx.pro/turash/test:latest
# Push to Harbor
docker push registry.bk.glpx.pro/turash/test:latest
```
### 4. Verify Kubernetes Integration
```bash
# Check image pull secret
kubectl get secret harbor-registry-secret -n turash
# Test pod can pull images
kubectl run test-pull --image=registry.bk.glpx.pro/turash/test:latest --rm -it --restart=Never
```
## Security Notes
⚠️ **IMPORTANT**:
- Passwords are stored in `harbor-secrets.yaml.template` (DO NOT COMMIT!)
- Change passwords regularly
- Use Kubernetes secrets for production
- Enable RBAC and access controls
- Configure retention policies
## Troubleshooting
### Can't Login to Harbor
- Check if Harbor core pods are running: `kubectl get pods -n harbor`
- Verify password in values: `helm get values harbor -n harbor | grep harborAdminPassword`
- Check logs: `kubectl logs -n harbor deployment/harbor-core`
### Can't Push/Pull Images
- Verify Docker login: `docker login registry.bk.glpx.pro`
- Check project exists and is accessible
- Verify network connectivity to registry
### Database Connection Issues
- Check PostgreSQL is running: `kubectl get pods -n data | grep postgres`
- Verify database exists: Connect to PostgreSQL and check for `harbor` database
- Check connection string in Harbor values

View File

@ -0,0 +1,113 @@
# Harbor Registry Setup Summary ✅
## Status: COMPLETE
### Harbor Registry
- ✅ **Deployed**: Running and accessible
- ✅ **URL**: https://registry.bk.glpx.pro
- ✅ **Database**: Connected to external PostgreSQL (`infra-postgres-rw`)
- ✅ **Storage**: Using `longhorn-fast` storage class
- ✅ **Admin Credentials**:
- Username: `admin`
- Password: `nVbR0IZv02zdZaM1zqjOz8FVbdzmXlEUaOb59D5Bkz0=`
### Woodpecker CI/CD
- ✅ **Repository Added**: `SamyRai/turash`
- ✅ **Registry Configured**: `registry.bk.glpx.pro`
- ✅ **Secrets Configured**: `docker_username`, `docker_password`
- ✅ **Pipeline Ready**: `.woodpecker.yml` configured
### ArgoCD
- ✅ **Repository Connected**: `https://github.com/SamyRai/turash.git`
- ✅ **Image Pull Secret**: `harbor-registry-argocd` created
- ✅ **Service Accounts Updated**: All ArgoCD components can pull from Harbor
### Kubernetes
- ✅ **Namespace**: `turash` created
- ✅ **Image Pull Secret**: `harbor-registry-secret` in `turash` namespace
- ✅ **Service Account**: Default SA patched with image pull secret
- ✅ **Deployments**: Configured to use Harbor images
## Quick Access
### Harbor Web UI
```bash
# Open in browser
open https://registry.bk.glpx.pro
# Or login via CLI
docker login registry.bk.glpx.pro -u admin -p "nVbR0IZv02zdZaM1zqjOz8FVbdzmXlEUaOb59D5Bkz0="
```
### Woodpecker
```bash
# View repository
woodpecker-cli repo show SamyRai/turash
# List pipelines
woodpecker-cli pipeline ls SamyRai/turash
# Trigger pipeline
woodpecker-cli pipeline start SamyRai/turash
```
### ArgoCD
```bash
# List applications
argocd app list
# Sync application
argocd app sync turash-backend
argocd app sync turash-frontend
```
## Next Steps
1. **Create Harbor Project**:
- Login to https://registry.bk.glpx.pro
- Create project: `turash`
- Set as public
2. **Test Pipeline**:
```bash
git push origin master
```
3. **Monitor Deployment**:
```bash
# Watch Woodpecker
woodpecker-cli pipeline logs SamyRai/turash <pipeline-number>
# Watch ArgoCD
argocd app get turash-backend
```
## Verification
### Check Harbor
```bash
kubectl get pods -n harbor
kubectl get ingress -n harbor
curl -k https://registry.bk.glpx.pro/api/v2.0/health
```
### Check Woodpecker
```bash
woodpecker-cli repo registry ls SamyRai/turash
woodpecker-cli repo secret ls SamyRai/turash
```
### Check ArgoCD
```bash
kubectl get secret harbor-registry-argocd -n argocd
argocd repo list
```
### Check Kubernetes
```bash
kubectl get secret harbor-registry-secret -n turash
kubectl get serviceaccount default -n turash -o yaml | grep imagePullSecrets
```
**Everything is configured and ready!** 🚀

View File

@ -0,0 +1,141 @@
# Woodpecker Repository Setup Guide
## Issue: Repository Not Showing in Sync UI
### Problem
When syncing repositories in Woodpecker UI, some repositories (especially private ones) may not appear due to:
1. **Pagination Limits**: GitHub API pagination limits (default 30-100 repos per page)
2. **OAuth Permissions**: Missing repository access permissions
3. **Repository ID Format**: Woodpecker requires numeric GitHub repository ID, not GraphQL node ID
### Solution: Add Repository Manually via CLI
#### Step 1: Get GitHub Repository ID
```bash
# Get numeric repository ID (not GraphQL node ID)
gh api repos/SamyRai/turash --jq '.id'
# Output: 1103592817
```
#### Step 2: Add Repository to Woodpecker
```bash
# Use numeric ID (not R_kgDOQcd9cQ format)
woodpecker-cli repo add 1103592817
```
#### Step 3: Verify Repository
```bash
# List repositories
woodpecker-cli repo ls | grep turash
# Show repository details
woodpecker-cli repo show SamyRai/turash
```
### Configure Harbor Registry
```bash
# Add Harbor registry
woodpecker-cli repo registry add SamyRai/turash \
--hostname registry.bk.glpx.pro \
--username admin \
--password "nVbR0IZv02zdZaM1zqjOz8FVbdzmXlEUaOb59D5Bkz0="
```
### Configure Secrets
```bash
# Add Docker credentials
woodpecker-cli repo secret add SamyRai/turash \
--name docker_username \
--value admin
woodpecker-cli repo secret add SamyRai/turash \
--name docker_password \
--value "nVbR0IZv02zdZaM1zqjOz8FVbdzmXlEUaOb59D5Bkz0="
```
### Verify Configuration
```bash
# List registries
woodpecker-cli repo registry ls SamyRai/turash
# List secrets
woodpecker-cli repo secret ls SamyRai/turash
```
## Understanding Repository IDs
### GitHub Repository ID Formats
1. **Numeric ID** (What Woodpecker needs):
```bash
gh api repos/OWNER/REPO --jq '.id'
# Example: 1103592817
```
2. **GraphQL Node ID** (Not compatible):
```bash
gh repo view OWNER/REPO --json id
# Example: R_kgDOQcd9cQ
```
### Why Sync UI May Not Show All Repos
1. **Pagination**: GitHub API returns repositories in pages (default 30 per page)
2. **Private Repos**: May require additional OAuth scopes
3. **Rate Limiting**: Too many repos can hit API rate limits
4. **UI Filtering**: UI may filter by active/inactive status
### Best Practices
1. **Add Important Repos Manually**: Use CLI for critical repositories
2. **Check OAuth Scopes**: Ensure `repo` scope is granted for private repos
3. **Use Sync for Discovery**: Use UI sync to discover repos, then activate manually
4. **Monitor Active Status**: Only active repositories run pipelines
## Troubleshooting
### Repository Not Found (404)
```bash
# Check if repository exists in GitHub
gh repo view SamyRai/turash
# Verify numeric ID
gh api repos/SamyRai/turash --jq '.id'
# Try adding with numeric ID
woodpecker-cli repo add <numeric-id>
```
### Can't Add Registry/Secrets
```bash
# Verify repository is active
woodpecker-cli repo show SamyRai/turash | grep ACTIVE
# Check repository ID format
woodpecker-cli repo show SamyRai/turash | grep ID
```
### Sync UI Still Not Showing
1. **Refresh Browser**: Clear cache and refresh
2. **Check Permissions**: Verify GitHub OAuth token has `repo` scope
3. **Use CLI**: Add repositories manually via CLI (more reliable)
4. **Check Logs**: Review Woodpecker server logs for sync errors
## Current Status
**Repository Added**: `SamyRai/turash` (ID: 1103592817)
**Active**: Yes
**Registry Configured**: `registry.bk.glpx.pro`
**Secrets Configured**: `docker_username`, `docker_password`
**Repository is ready for CI/CD!** 🚀

View File

@ -0,0 +1,22 @@
# Harbor Secrets Template
# DO NOT COMMIT THIS FILE WITH ACTUAL VALUES!
# Copy to harbor-secrets.yaml and fill in values, then add to .gitignore
# Harbor Admin Credentials
HARBOR_ADMIN_USERNAME: "admin"
HARBOR_ADMIN_PASSWORD: "nVbR0IZv02zdZaM1zqjOz8FVbdzmXlEUaOb59D5Bkz0="
# Harbor Secret Key (for internal encryption)
HARBOR_SECRET_KEY: "TseX7sE/+7Luy2DHvE024/nqG1JaDt6usDNiW71ZV/8="
# Harbor Registry Endpoint
HARBOR_REGISTRY: "registry.bk.glpx.pro"
# Docker Registry Credentials (for Kubernetes image pulls)
DOCKER_USERNAME: "admin"
DOCKER_PASSWORD: "nVbR0IZv02zdZaM1zqjOz8FVbdzmXlEUaOb59D5Bkz0="
# Woodpecker CI/CD Secrets
WOODPECKER_DOCKER_USERNAME: "admin"
WOODPECKER_DOCKER_PASSWORD: "nVbR0IZv02zdZaM1zqjOz8FVbdzmXlEUaOb59D5Bkz0="

View File

@ -0,0 +1,144 @@
# Harbor Configuration for k3s/containerd
# Optimized for production use with Woodpecker and ArgoCD
expose:
type: ingress
ingress:
className: traefik
hosts:
core: registry.bk.glpx.pro
notary: notary.bk.glpx.pro
annotations:
cert-manager.io/cluster-issuer: letsencrypt-prod
traefik.ingress.kubernetes.io/router.entrypoints: websecure
traefik.ingress.kubernetes.io/router.tls: "true"
tls:
enabled: true
certSource: secret
secret:
secretName: harbor-tls
notarySecretName: notary-tls
# External URL
externalURL: https://registry.bk.glpx.pro
# Persistence - using Longhorn Fast
persistence:
persistentVolumeClaim:
registry:
existingClaim: ""
storageClass: "longhorn-fast"
accessMode: ReadWriteOnce
size: 50Gi
chartmuseum:
existingClaim: ""
storageClass: "longhorn-fast"
accessMode: ReadWriteOnce
size: 5Gi
jobservice:
existingClaim: ""
storageClass: "longhorn-fast"
accessMode: ReadWriteOnce
size: 1Gi
# Database storage not needed - using external PostgreSQL
redis:
existingClaim: ""
storageClass: "longhorn-fast"
accessMode: ReadWriteOnce
size: 2Gi
trivy:
existingClaim: ""
storageClass: "longhorn-fast"
accessMode: ReadWriteOnce
size: 5Gi
# Harbor Core Configuration
harborAdminPassword: "nVbR0IZv02zdZaM1zqjOz8FVbdzmXlEUaOb59D5Bkz0="
secretKey: "TseX7sE/+7Luy2DHvE024/nqG1JaDt6usDNiW71ZV/8="
# Database - using external PostgreSQL from infra namespace
database:
type: external
external:
host: infra-postgres-rw.data.svc.cluster.local
port: "5432"
username: "app"
password: "RXMPNHkY/Pnh54xz825MjCukUgo1XTKDk4ehbrMEOx8="
coreDatabase: "harbor"
sslmode: "disable"
# Connection pool settings
maxIdleConns: 100
maxOpenConns: 900
# Redis - using internal Redis
redis:
type: internal
internal:
image:
repository: goharbor/redis-photon
tag: v2.11.0
# Registry Configuration
registry:
registry:
image:
repository: goharbor/registry-photon
tag: v2.10.0
controller:
image:
repository: goharbor/harbor-registryctl
tag: v2.10.0
# Trivy Scanner (vulnerability scanning)
trivy:
enabled: true
storage:
size: 5Gi
# Notary (content trust)
notary:
enabled: true
# Chartmuseum (Helm charts)
chartmuseum:
enabled: false # Disable if not using Helm charts
# Job Service
jobservice:
replicas: 1
maxJobWorkers: 10
# Core Service
core:
replicas: 2
image:
repository: goharbor/harbor-core
tag: v2.11.0
# Proxy Configuration
proxy:
noProxy: "127.0.0.1,localhost,.local,.internal,kubernetes.default.svc,.svc,.svc.cluster.local"
# Metrics
metrics:
enabled: true
core:
path: /metrics
port: 8001
registry:
path: /metrics
port: 8001
jobservice:
path: /metrics
port: 8001
exporter:
path: /metrics
port: 8001
# Logging
logLevel: info
# Update Strategy
updateStrategy:
type: RollingUpdate

43
k8s/secret.yaml.template Normal file
View File

@ -0,0 +1,43 @@
# Secret Template
# Copy this file to secret.yaml and fill in the actual values
# DO NOT commit secret.yaml to git!
#
# To create the secret:
# kubectl create secret generic turash-backend-secret \
# --from-file=secret.yaml \
# --namespace=turash \
# --dry-run=client -o yaml | kubectl apply -f -
#
# Or use sealed-secrets, external-secrets, or your preferred secret management solution
apiVersion: v1
kind: Secret
metadata:
name: turash-backend-secret
namespace: turash
labels:
app: turash-backend
component: secret
type: Opaque
stringData:
# JWT Secret - MUST be changed in production!
JWT_SECRET: "CHANGE_ME_TO_STRONG_RANDOM_SECRET"
# PostgreSQL Credentials
POSTGRES_USER: "turash"
POSTGRES_PASSWORD: "CHANGE_ME_STRONG_PASSWORD"
# Neo4j Credentials
NEO4J_PASSWORD: "CHANGE_ME_STRONG_PASSWORD"
# Redis Password (if required)
REDIS_PASSWORD: "CHANGE_ME_STRONG_PASSWORD"
# Ollama Credentials (optional)
OLLAMA_USERNAME: ""
OLLAMA_PASSWORD: ""
# Google Maps API Key (optional)
GOOGLE_KG_API_KEY: ""
GOOGLE_CLOUD_PROJECT_ID: ""

42
k8s/service.yaml Normal file
View File

@ -0,0 +1,42 @@
apiVersion: v1
kind: Service
metadata:
name: turash-backend
namespace: turash
labels:
app: turash-backend
component: backend
spec:
type: ClusterIP
ports:
- name: http
port: 80
targetPort: http
protocol: TCP
selector:
app: turash-backend
sessionAffinity: ClientIP
sessionAffinityConfig:
clientIP:
timeoutSeconds: 10800
---
apiVersion: v1
kind: Service
metadata:
name: turash-backend-headless
namespace: turash
labels:
app: turash-backend
component: backend
spec:
type: ClusterIP
clusterIP: None
ports:
- name: http
port: 8080
targetPort: http
protocol: TCP
selector:
app: turash-backend