turash/bugulma/backend/internal/repository/trust_metrics_repository.go

415 lines
13 KiB
Go

package repository
import (
"bugulma/backend/internal/domain"
"context"
"math"
"time"
"gorm.io/gorm"
)
// TrustMetricsRepository implements domain.TrustMetricsRepository with GORM
type TrustMetricsRepository struct {
*BaseRepository[domain.TrustMetrics]
}
// NewTrustMetricsRepository creates a new GORM-based trust metrics repository
func NewTrustMetricsRepository(db *gorm.DB) domain.TrustMetricsRepository {
return &TrustMetricsRepository{
BaseRepository: NewBaseRepository[domain.TrustMetrics](db),
}
}
// GetByOrganization retrieves all trust metrics for an organization
func (r *TrustMetricsRepository) GetByOrganization(ctx context.Context, orgID string) ([]*domain.TrustMetrics, error) {
return r.FindWhereWithContext(ctx, "organization_id = ?", orgID)
}
// GetByType retrieves trust metrics of a specific type for an organization
func (r *TrustMetricsRepository) GetByType(ctx context.Context, orgID, metricType string) ([]*domain.TrustMetrics, error) {
return r.FindWhereWithContext(ctx, "organization_id = ? AND metric_type = ?", orgID, metricType)
}
// GetLatestProfile retrieves the latest comprehensive quality profile for an organization
func (r *TrustMetricsRepository) GetLatestProfile(ctx context.Context, orgID string) (*domain.DataQualityProfile, error) {
// Get all metrics for the organization
metrics, err := r.GetByOrganization(ctx, orgID)
if err != nil {
return nil, err
}
// Convert []*domain.TrustMetrics to []domain.TrustMetrics
metricsSlice := make([]domain.TrustMetrics, len(metrics))
for i, metric := range metrics {
metricsSlice[i] = *metric
}
// Calculate profile scores
profile := &domain.DataQualityProfile{
OrganizationID: orgID,
LastAssessed: time.Now(),
AssessmentPeriod: 30 * 24 * time.Hour, // 30 days
Metrics: metricsSlice,
}
// Calculate completeness score
completeness := r.calculateCompletenessScore(metrics)
profile.Completeness = completeness
// Calculate accuracy score
accuracy := r.calculateAccuracyScore(metrics)
profile.Accuracy = accuracy
// Calculate overall score (weighted average)
profile.OverallScore = (completeness*0.4 + accuracy*0.4 + profile.Timeliness*0.1 + profile.Consistency*0.1)
// Generate recommendations
profile.Recommendations = r.generateRecommendations(profile)
return profile, nil
}
// calculateCompletenessScore calculates how complete the organization's data is
func (r *TrustMetricsRepository) calculateCompletenessScore(metrics []*domain.TrustMetrics) float64 {
if len(metrics) == 0 {
return 0.0
}
// Look for completeness metrics
for _, metric := range metrics {
if metric.MetricName == "data_completeness" && metric.MetricType == "organization" {
return metric.Value
}
}
// Default calculation based on number of metrics
score := float64(len(metrics)) / 10.0 // Assume 10 key metrics
if score > 1.0 {
score = 1.0
}
return score
}
// calculateAccuracyScore calculates data accuracy based on metrics
func (r *TrustMetricsRepository) calculateAccuracyScore(metrics []*domain.TrustMetrics) float64 {
accuracyMetrics := 0
totalScore := 0.0
for _, metric := range metrics {
if metric.MetricName == "data_accuracy" || metric.MetricName == "verification_score" {
accuracyMetrics++
totalScore += metric.Value
}
}
if accuracyMetrics == 0 {
return 0.5 // Default neutral score
}
return totalScore / float64(accuracyMetrics)
}
// generateRecommendations generates improvement recommendations
func (r *TrustMetricsRepository) generateRecommendations(profile *domain.DataQualityProfile) []string {
var recommendations []string
if profile.Completeness < 0.7 {
recommendations = append(recommendations, "Complete missing profile information to improve trust scores")
}
if profile.Accuracy < 0.8 {
recommendations = append(recommendations, "Verify and update outdated information")
}
if profile.OverallScore < 0.6 {
recommendations = append(recommendations, "Consider third-party verification to build credibility")
}
if len(recommendations) == 0 {
recommendations = append(recommendations, "Profile quality is good - continue maintaining data accuracy")
}
return recommendations
}
// VerifiedDataRepository implements operations for verified data
type VerifiedDataRepository struct {
*BaseRepository[domain.VerifiedData]
}
// NewVerifiedDataRepository creates a new verified data repository
func NewVerifiedDataRepository(db *gorm.DB) *VerifiedDataRepository {
return &VerifiedDataRepository{
BaseRepository: NewBaseRepository[domain.VerifiedData](db),
}
}
// GetByOrganization retrieves verified data for an organization
func (r *VerifiedDataRepository) GetByOrganization(ctx context.Context, orgID string) ([]*domain.VerifiedData, error) {
return r.FindWhereWithContext(ctx, "organization_id = ?", orgID)
}
// GetByStatus retrieves verified data by status
func (r *VerifiedDataRepository) GetByStatus(ctx context.Context, status domain.VerificationStatus) ([]*domain.VerifiedData, error) {
return r.FindWhereWithContext(ctx, "status = ?", status)
}
// HistoricalSuccessRepository implements operations for historical success metrics
type HistoricalSuccessRepository struct {
*BaseRepository[domain.HistoricalSuccess]
}
// NewHistoricalSuccessRepository creates a new historical success repository
func NewHistoricalSuccessRepository(db *gorm.DB) *HistoricalSuccessRepository {
return &HistoricalSuccessRepository{
BaseRepository: NewBaseRepository[domain.HistoricalSuccess](db),
}
}
// GetByOrganization retrieves historical success metrics for an organization
func (r *HistoricalSuccessRepository) GetByOrganization(ctx context.Context, orgID string) ([]*domain.HistoricalSuccess, error) {
return r.FindWhereWithContext(ctx, "organization_id = ?", orgID)
}
// GetByMetricType retrieves metrics of a specific type
func (r *HistoricalSuccessRepository) GetByMetricType(ctx context.Context, orgID, metricType string) ([]*domain.HistoricalSuccess, error) {
return r.FindWhereWithContext(ctx, "organization_id = ? AND metric_type = ?", orgID, metricType)
}
// CalculateTrustScore calculates overall trust score for an organization
func (r *HistoricalSuccessRepository) CalculateTrustScore(ctx context.Context, orgID string) (*domain.TrustScore, error) {
// Get trust metrics
trustRepo := NewTrustMetricsRepository(r.DB())
metrics, err := trustRepo.GetByOrganization(ctx, orgID)
if err != nil {
return nil, err
}
// Get historical success metrics
historical, err := r.GetByOrganization(ctx, orgID)
if err != nil {
return nil, err
}
// Calculate component scores
trustScore := &domain.TrustScore{
OrganizationID: orgID,
LastCalculated: time.Now(),
ScoreBreakdown: make(map[string]float64),
}
// Data quality score
dataQualityScore := calculateDataQualityScore(metrics)
trustScore.DataQualityScore = dataQualityScore
trustScore.ScoreBreakdown["data_quality"] = dataQualityScore
// Verification score
verificationScore := calculateVerificationScore(metrics)
trustScore.VerificationScore = verificationScore
trustScore.ScoreBreakdown["verification"] = verificationScore
// Historical score
historicalScore := calculateHistoricalScore(historical)
trustScore.HistoricalScore = historicalScore
trustScore.ScoreBreakdown["historical"] = historicalScore
// Peer review score - calculated from historical success metrics
// Organizations with successful matches and high completion rates have better peer review scores
peerReviewScore := calculatePeerReviewScore(historical, metrics)
trustScore.PeerReviewScore = peerReviewScore
trustScore.ScoreBreakdown["peer_review"] = peerReviewScore
// Overall score (weighted average)
trustScore.OverallScore = (dataQualityScore*0.3 + verificationScore*0.3 + historicalScore*0.3 + trustScore.PeerReviewScore*0.1)
// Confidence level based on data availability
trustScore.ConfidenceLevel = calculateConfidenceLevel(metrics, historical)
// Risk factors and recommendations
trustScore.RiskFactors = identifyRiskFactors(trustScore)
trustScore.Recommendations = generateTrustRecommendations(trustScore)
return trustScore, nil
}
// Helper functions for trust score calculation
func calculateDataQualityScore(metrics []*domain.TrustMetrics) float64 {
if len(metrics) == 0 {
return 0.3 // Low default
}
totalScore := 0.0
count := 0
for _, metric := range metrics {
if metric.MetricType == "organization" || metric.MetricType == "resource_flow" {
totalScore += metric.Value
count++
}
}
if count == 0 {
return 0.3
}
return totalScore / float64(count)
}
func calculateVerificationScore(metrics []*domain.TrustMetrics) float64 {
for _, metric := range metrics {
if metric.MetricName == "verification_score" {
return metric.Value
}
}
return 0.0 // No verification data
}
func calculateHistoricalScore(historical []*domain.HistoricalSuccess) float64 {
if len(historical) == 0 {
return 0.4 // Neutral default for new organizations
}
totalScore := 0.0
count := 0
for _, h := range historical {
if h.MetricType == "completion_rate" {
totalScore += h.Value
count++
}
}
if count == 0 {
return 0.4
}
return totalScore / float64(count)
}
func calculateConfidenceLevel(metrics []*domain.TrustMetrics, historical []*domain.HistoricalSuccess) float64 {
dataPoints := len(metrics) + len(historical)
// Confidence increases with more data points
if dataPoints < 5 {
return 0.3
} else if dataPoints < 15 {
return 0.6
} else {
return 0.9
}
}
func identifyRiskFactors(trustScore *domain.TrustScore) []domain.RiskFactor {
var risks []domain.RiskFactor
if trustScore.DataQualityScore < 0.5 {
risks = append(risks, domain.RiskFactor{
Type: "data_quality",
Description: "Low data quality score indicates incomplete or inaccurate information",
Severity: 0.7,
Impact: "high",
Mitigation: "Complete profile information and verify data accuracy",
})
}
if trustScore.VerificationScore < 0.3 {
risks = append(risks, domain.RiskFactor{
Type: "verification",
Description: "Limited verification reduces trustworthiness",
Severity: 0.6,
Impact: "medium",
Mitigation: "Pursue third-party verification or certification",
})
}
if trustScore.HistoricalScore < 0.4 {
risks = append(risks, domain.RiskFactor{
Type: "historical",
Description: "Limited historical performance data",
Severity: 0.4,
Impact: "low",
Mitigation: "Build track record through successful partnerships",
})
}
return risks
}
func generateTrustRecommendations(trustScore *domain.TrustScore) []string {
var recommendations []string
if trustScore.OverallScore < 0.5 {
recommendations = append(recommendations, "Focus on improving data quality and verification to build trust")
recommendations = append(recommendations, "Consider obtaining industry certifications or third-party verification")
}
if trustScore.DataQualityScore < 0.7 {
recommendations = append(recommendations, "Complete all profile information and keep data current")
}
if trustScore.VerificationScore < 0.5 {
recommendations = append(recommendations, "Pursue verification for key data elements")
}
if trustScore.ConfidenceLevel < 0.6 {
recommendations = append(recommendations, "Build more comprehensive data profile to increase confidence")
}
if len(recommendations) == 0 {
recommendations = append(recommendations, "Trust profile is strong - maintain current standards")
}
return recommendations
}
// calculatePeerReviewScore calculates peer review score based on historical performance
// This uses historical success metrics to infer peer satisfaction
func calculatePeerReviewScore(historical []*domain.HistoricalSuccess, metrics []*domain.TrustMetrics) float64 {
if len(historical) == 0 {
return 0.4 // Neutral default for new organizations
}
// Calculate average completion rate (indicates successful partnerships)
totalCompletion := 0.0
completionCount := 0
// Calculate average satisfaction if available
totalSatisfaction := 0.0
satisfactionCount := 0
for _, h := range historical {
if h.MetricType == "completion_rate" {
totalCompletion += h.Value
completionCount++
} else if h.MetricType == "satisfaction" {
totalSatisfaction += h.Value
satisfactionCount++
}
}
// Base score from completion rate (0.6 weight)
completionScore := 0.4
if completionCount > 0 {
avgCompletion := totalCompletion / float64(completionCount)
completionScore = avgCompletion * 0.6
}
// Satisfaction score (0.4 weight)
satisfactionScore := 0.3
if satisfactionCount > 0 {
avgSatisfaction := totalSatisfaction / float64(satisfactionCount)
satisfactionScore = avgSatisfaction * 0.4
}
// Combine scores
peerScore := completionScore + satisfactionScore
// Boost score if organization has many successful partnerships
if len(historical) > 10 {
peerScore = math.Min(1.0, peerScore*1.1)
}
return math.Max(0.0, math.Min(1.0, peerScore))
}