feat: 定时任务重构调整key验证策略
This commit is contained in:
@@ -1,8 +1,8 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
"gpt-load/internal/config"
|
||||
"gpt-load/internal/keypool"
|
||||
"gpt-load/internal/models"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -11,11 +11,11 @@ import (
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// KeyCronService is responsible for periodically submitting keys for validation.
|
||||
// KeyCronService is responsible for periodically validating invalid keys.
|
||||
type KeyCronService struct {
|
||||
DB *gorm.DB
|
||||
SettingsManager *config.SystemSettingsManager
|
||||
Pool *KeyValidationPool
|
||||
Validator *keypool.KeyValidator
|
||||
LeaderService *LeaderService
|
||||
stopChan chan struct{}
|
||||
wg sync.WaitGroup
|
||||
@@ -25,13 +25,13 @@ type KeyCronService struct {
|
||||
func NewKeyCronService(
|
||||
db *gorm.DB,
|
||||
settingsManager *config.SystemSettingsManager,
|
||||
pool *KeyValidationPool,
|
||||
validator *keypool.KeyValidator,
|
||||
leaderService *LeaderService,
|
||||
) *KeyCronService {
|
||||
return &KeyCronService{
|
||||
DB: db,
|
||||
SettingsManager: settingsManager,
|
||||
Pool: pool,
|
||||
Validator: validator,
|
||||
LeaderService: leaderService,
|
||||
stopChan: make(chan struct{}),
|
||||
}
|
||||
@@ -77,7 +77,7 @@ func (s *KeyCronService) runLoop() {
|
||||
}
|
||||
}
|
||||
|
||||
// submitValidationJobs finds groups and keys that need validation and submits them to the pool.
|
||||
// submitValidationJobs finds groups whose keys need validation and validates them.
|
||||
func (s *KeyCronService) submitValidationJobs() {
|
||||
var groups []models.Group
|
||||
if err := s.DB.Find(&groups).Error; err != nil {
|
||||
@@ -86,61 +86,47 @@ func (s *KeyCronService) submitValidationJobs() {
|
||||
}
|
||||
|
||||
validationStartTime := time.Now()
|
||||
groupsToUpdateTimestamp := make(map[uint]*models.Group)
|
||||
|
||||
total := 0
|
||||
for i := range groups {
|
||||
group := &groups[i]
|
||||
effectiveSettings := s.SettingsManager.GetEffectiveConfig(group.Config)
|
||||
interval := time.Duration(effectiveSettings.KeyValidationIntervalMinutes) * time.Minute
|
||||
|
||||
if group.LastValidatedAt == nil || validationStartTime.Sub(*group.LastValidatedAt) > interval {
|
||||
groupsToUpdateTimestamp[group.ID] = group
|
||||
var keys []models.APIKey
|
||||
if err := s.DB.Where("group_id = ?", group.ID).Find(&keys).Error; err != nil {
|
||||
logrus.Errorf("KeyCronService: Failed to get keys for group %s: %v", group.Name, err)
|
||||
groupProcessStart := time.Now()
|
||||
var invalidKeys []models.APIKey
|
||||
err := s.DB.Where("group_id = ? AND status = ?", group.ID, models.KeyStatusInvalid).Find(&invalidKeys).Error
|
||||
if err != nil {
|
||||
logrus.Errorf("KeyCronService: Failed to get invalid keys for group %s: %v", group.Name, err)
|
||||
continue
|
||||
}
|
||||
|
||||
lenKey := len(keys)
|
||||
validatedCount := len(invalidKeys)
|
||||
becameValidCount := 0
|
||||
if validatedCount > 0 {
|
||||
logrus.Debugf("KeyCronService: Found %d invalid keys to validate for group %s.", validatedCount, group.Name)
|
||||
for j := range invalidKeys {
|
||||
key := &invalidKeys[j]
|
||||
isValid, _ := s.Validator.ValidateSingleKey(key, group)
|
||||
|
||||
if lenKey == 0 {
|
||||
continue
|
||||
}
|
||||
|
||||
total += lenKey
|
||||
|
||||
if lenKey > 0 {
|
||||
logrus.Infof("KeyCronService: Submitting %d keys for group %s for validation.", lenKey, group.Name)
|
||||
}
|
||||
|
||||
for _, key := range keys {
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
|
||||
job := ValidationJob{
|
||||
Key: key,
|
||||
Group: group,
|
||||
Ctx: ctx,
|
||||
CancelFunc: cancel,
|
||||
if isValid {
|
||||
becameValidCount++
|
||||
}
|
||||
}
|
||||
|
||||
s.Pool.SubmitJob(job)
|
||||
}
|
||||
|
||||
if err := s.DB.Model(group).Update("last_validated_at", time.Now()).Error; err != nil {
|
||||
logrus.Errorf("KeyCronService: Failed to update last_validated_at for group %s: %v", group.Name, err)
|
||||
}
|
||||
|
||||
duration := time.Since(groupProcessStart)
|
||||
logrus.Infof(
|
||||
"KeyCronService: Group '%s' validation finished. Total checked: %d, became valid: %d. Duration: %s.",
|
||||
group.Name,
|
||||
validatedCount,
|
||||
becameValidCount,
|
||||
duration.String(),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Update timestamps for all groups that were due for validation
|
||||
if len(groupsToUpdateTimestamp) > 0 {
|
||||
s.updateGroupTimestamps(groupsToUpdateTimestamp, validationStartTime)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *KeyCronService) updateGroupTimestamps(groups map[uint]*models.Group, validationStartTime time.Time) {
|
||||
var groupIDs []uint
|
||||
for id := range groups {
|
||||
groupIDs = append(groupIDs, id)
|
||||
}
|
||||
if err := s.DB.Model(&models.Group{}).Where("id IN ?", groupIDs).Update("last_validated_at", validationStartTime).Error; err != nil {
|
||||
logrus.Errorf("KeyCronService: Failed to batch update last_validated_at for groups: %v", err)
|
||||
}
|
||||
}
|
||||
|
@@ -1,7 +1,6 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"gpt-load/internal/config"
|
||||
"gpt-load/internal/keypool"
|
||||
@@ -71,12 +70,8 @@ func (s *KeyManualValidationService) runValidation(group *models.Group, keys []m
|
||||
jobs := make(chan models.APIKey, len(keys))
|
||||
results := make(chan bool, len(keys))
|
||||
|
||||
performanceConfig := s.ConfigManager.GetPerformanceConfig()
|
||||
concurrency := performanceConfig.KeyValidationPoolSize
|
||||
|
||||
if concurrency <= 0 {
|
||||
concurrency = 10
|
||||
}
|
||||
// 固定10并发,避免超频
|
||||
concurrency := 10
|
||||
|
||||
var wg sync.WaitGroup
|
||||
for range concurrency {
|
||||
@@ -136,7 +131,7 @@ func (s *KeyManualValidationService) runValidation(group *models.Group, keys []m
|
||||
func (s *KeyManualValidationService) validationWorker(wg *sync.WaitGroup, group *models.Group, jobs <-chan models.APIKey, results chan<- bool) {
|
||||
defer wg.Done()
|
||||
for key := range jobs {
|
||||
isValid, _ := s.Validator.ValidateSingleKey(context.Background(), &key, group)
|
||||
isValid, _ := s.Validator.ValidateSingleKey(&key, group)
|
||||
results <- isValid
|
||||
}
|
||||
}
|
||||
|
@@ -1,7 +1,6 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"gpt-load/internal/keypool"
|
||||
@@ -282,7 +281,7 @@ func (s *KeyService) ListKeysInGroupQuery(groupID uint, statusFilter string, sea
|
||||
}
|
||||
|
||||
// TestMultipleKeys handles a one-off validation test for multiple keys.
|
||||
func (s *KeyService) TestMultipleKeys(ctx context.Context, group *models.Group, keysText string) ([]keypool.KeyTestResult, error) {
|
||||
func (s *KeyService) TestMultipleKeys(group *models.Group, keysText string) ([]keypool.KeyTestResult, error) {
|
||||
keysToTest := s.ParseKeysFromText(keysText)
|
||||
if len(keysToTest) > maxRequestKeys {
|
||||
return nil, fmt.Errorf("batch size exceeds the limit of %d keys, got %d", maxRequestKeys, len(keysToTest))
|
||||
@@ -298,7 +297,7 @@ func (s *KeyService) TestMultipleKeys(ctx context.Context, group *models.Group,
|
||||
end = len(keysToTest)
|
||||
}
|
||||
chunk := keysToTest[i:end]
|
||||
results, err := s.KeyValidator.TestMultipleKeys(ctx, group, chunk)
|
||||
results, err := s.KeyValidator.TestMultipleKeys(group, chunk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@@ -1,93 +0,0 @@
|
||||
package services
|
||||
|
||||
import (
|
||||
"context"
|
||||
"gpt-load/internal/keypool"
|
||||
"gpt-load/internal/models"
|
||||
"gpt-load/internal/types"
|
||||
"sync"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ValidationJob represents a single key validation task for the worker pool.
|
||||
type ValidationJob struct {
|
||||
TaskID string
|
||||
Key models.APIKey
|
||||
Group *models.Group
|
||||
Ctx context.Context
|
||||
CancelFunc context.CancelFunc
|
||||
}
|
||||
|
||||
// KeyValidationPool manages a global worker pool for key validation.
|
||||
type KeyValidationPool struct {
|
||||
validator *keypool.KeyValidator
|
||||
configManager types.ConfigManager
|
||||
jobs chan ValidationJob
|
||||
stopChan chan struct{}
|
||||
wg sync.WaitGroup
|
||||
}
|
||||
|
||||
// NewKeyValidationPool creates a new KeyValidationPool.
|
||||
func NewKeyValidationPool(validator *keypool.KeyValidator, configManager types.ConfigManager) *KeyValidationPool {
|
||||
return &KeyValidationPool{
|
||||
validator: validator,
|
||||
configManager: configManager,
|
||||
jobs: make(chan ValidationJob, 1024),
|
||||
stopChan: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
// Start initializes and runs the worker pool.
|
||||
func (p *KeyValidationPool) Start() {
|
||||
performanceConfig := p.configManager.GetPerformanceConfig()
|
||||
concurrency := performanceConfig.KeyValidationPoolSize
|
||||
if concurrency <= 0 {
|
||||
concurrency = 10
|
||||
}
|
||||
|
||||
logrus.Infof("Starting KeyValidationPool with %d workers...", concurrency)
|
||||
|
||||
p.wg.Add(concurrency)
|
||||
for range concurrency {
|
||||
go p.worker()
|
||||
}
|
||||
}
|
||||
|
||||
// Stop gracefully stops the worker pool.
|
||||
func (p *KeyValidationPool) Stop() {
|
||||
logrus.Info("Stopping KeyValidationPool...")
|
||||
close(p.stopChan)
|
||||
close(p.jobs)
|
||||
p.wg.Wait()
|
||||
|
||||
logrus.Info("KeyValidationPool stopped.")
|
||||
}
|
||||
|
||||
// worker is a single goroutine that processes jobs.
|
||||
func (p *KeyValidationPool) worker() {
|
||||
defer p.wg.Done()
|
||||
for {
|
||||
select {
|
||||
case job, ok := <-p.jobs:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
ctx := job.Ctx
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
p.validator.ValidateSingleKey(ctx, &job.Key, job.Group)
|
||||
if job.CancelFunc != nil {
|
||||
job.CancelFunc()
|
||||
}
|
||||
case <-p.stopChan:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SubmitJob adds a new validation job to the pool.
|
||||
func (p *KeyValidationPool) SubmitJob(job ValidationJob) {
|
||||
p.jobs <- job
|
||||
}
|
Reference in New Issue
Block a user