feat: 前端搭建-未完成
This commit is contained in:
14
internal/channel/channel.go
Normal file
14
internal/channel/channel.go
Normal file
@@ -0,0 +1,14 @@
|
||||
package channel
|
||||
|
||||
import (
|
||||
"gpt-load/internal/models"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// ChannelProxy defines the interface for different API channel proxies.
|
||||
type ChannelProxy interface {
|
||||
// Handle takes a context, an API key, and the original request,
|
||||
// then forwards the request to the upstream service.
|
||||
Handle(c *gin.Context, apiKey *models.APIKey, group *models.Group)
|
||||
}
|
18
internal/channel/factory.go
Normal file
18
internal/channel/factory.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package channel
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"gpt-load/internal/models"
|
||||
)
|
||||
|
||||
// GetChannel returns a channel proxy based on the group's channel type.
|
||||
func GetChannel(group *models.Group) (ChannelProxy, error) {
|
||||
switch group.ChannelType {
|
||||
case "openai":
|
||||
return NewOpenAIChannel(group)
|
||||
case "gemini":
|
||||
return NewGeminiChannel(group)
|
||||
default:
|
||||
return nil, fmt.Errorf("unsupported channel type: %s", group.ChannelType)
|
||||
}
|
||||
}
|
55
internal/channel/gemini_channel.go
Normal file
55
internal/channel/gemini_channel.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package channel
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"gpt-load/internal/models"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const GeminiBaseURL = "https://generativelanguage.googleapis.com"
|
||||
|
||||
type GeminiChannel struct {
|
||||
BaseURL *url.URL
|
||||
}
|
||||
|
||||
func NewGeminiChannel(group *models.Group) (*GeminiChannel, error) {
|
||||
baseURL, err := url.Parse(GeminiBaseURL)
|
||||
if err != nil {
|
||||
return nil, err // Should not happen with a constant
|
||||
}
|
||||
return &GeminiChannel{BaseURL: baseURL}, nil
|
||||
}
|
||||
|
||||
func (ch *GeminiChannel) Handle(c *gin.Context, apiKey *models.APIKey, group *models.Group) {
|
||||
proxy := httputil.NewSingleHostReverseProxy(ch.BaseURL)
|
||||
|
||||
proxy.Director = func(req *http.Request) {
|
||||
// Gemini API key is passed as a query parameter
|
||||
originalPath := c.Param("path")
|
||||
newPath := fmt.Sprintf("%s?key=%s", originalPath, apiKey.KeyValue)
|
||||
|
||||
req.URL.Scheme = ch.BaseURL.Scheme
|
||||
req.URL.Host = ch.BaseURL.Host
|
||||
req.URL.Path = newPath
|
||||
req.Host = ch.BaseURL.Host
|
||||
// Remove the Authorization header if it was passed by the client
|
||||
req.Header.Del("Authorization")
|
||||
}
|
||||
|
||||
proxy.ModifyResponse = func(resp *http.Response) error {
|
||||
// Log the response, etc.
|
||||
return nil
|
||||
}
|
||||
|
||||
proxy.ErrorHandler = func(w http.ResponseWriter, r *http.Request, err error) {
|
||||
logrus.Errorf("Proxy error to Gemini: %v", err)
|
||||
// Handle error, maybe update key status
|
||||
}
|
||||
|
||||
proxy.ServeHTTP(c.Writer, c.Request)
|
||||
}
|
55
internal/channel/openai_channel.go
Normal file
55
internal/channel/openai_channel.go
Normal file
@@ -0,0 +1,55 @@
|
||||
package channel
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"gpt-load/internal/models"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"net/url"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type OpenAIChannel struct {
|
||||
BaseURL *url.URL
|
||||
}
|
||||
|
||||
type OpenAIChannelConfig struct {
|
||||
BaseURL string `json:"base_url"`
|
||||
}
|
||||
|
||||
func NewOpenAIChannel(group *models.Group) (*OpenAIChannel, error) {
|
||||
var config OpenAIChannelConfig
|
||||
if err := json.Unmarshal([]byte(group.Config), &config); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
baseURL, err := url.Parse(config.BaseURL)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &OpenAIChannel{BaseURL: baseURL}, nil
|
||||
}
|
||||
|
||||
func (ch *OpenAIChannel) Handle(c *gin.Context, apiKey *models.APIKey, group *models.Group) {
|
||||
proxy := httputil.NewSingleHostReverseProxy(ch.BaseURL)
|
||||
proxy.Director = func(req *http.Request) {
|
||||
req.URL.Scheme = ch.BaseURL.Scheme
|
||||
req.URL.Host = ch.BaseURL.Host
|
||||
req.URL.Path = c.Param("path")
|
||||
req.Host = ch.BaseURL.Host
|
||||
req.Header.Set("Authorization", "Bearer "+apiKey.KeyValue)
|
||||
}
|
||||
|
||||
proxy.ModifyResponse = func(resp *http.Response) error {
|
||||
// Log the response, etc.
|
||||
return nil
|
||||
}
|
||||
|
||||
proxy.ErrorHandler = func(w http.ResponseWriter, r *http.Request, err error) {
|
||||
logrus.Errorf("Proxy error: %v", err)
|
||||
// Handle error, maybe update key status
|
||||
}
|
||||
|
||||
proxy.ServeHTTP(c.Writer, c.Request)
|
||||
}
|
@@ -55,6 +55,15 @@ type Config struct {
|
||||
|
||||
// NewManager creates a new configuration manager
|
||||
func NewManager() (types.ConfigManager, error) {
|
||||
manager := &Manager{}
|
||||
if err := manager.ReloadConfig(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return manager, nil
|
||||
}
|
||||
|
||||
// ReloadConfig reloads the configuration from environment variables
|
||||
func (m *Manager) ReloadConfig() error {
|
||||
// Try to load .env file
|
||||
if err := godotenv.Load(); err != nil {
|
||||
logrus.Info("Info: Create .env file to support environment variable configuration")
|
||||
@@ -104,15 +113,17 @@ func NewManager() (types.ConfigManager, error) {
|
||||
EnableRequest: parseBoolean(os.Getenv("LOG_ENABLE_REQUEST"), true),
|
||||
},
|
||||
}
|
||||
|
||||
manager := &Manager{config: config}
|
||||
m.config = config
|
||||
|
||||
// Validate configuration
|
||||
if err := manager.Validate(); err != nil {
|
||||
return nil, err
|
||||
if err := m.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return manager, nil
|
||||
logrus.Info("Configuration reloaded successfully")
|
||||
m.DisplayConfig()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetServerConfig returns server configuration
|
||||
|
62
internal/db/database.go
Normal file
62
internal/db/database.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package db
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"gpt-load/internal/models"
|
||||
"log"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"gorm.io/driver/mysql"
|
||||
"gorm.io/gorm"
|
||||
"gorm.io/gorm/logger"
|
||||
)
|
||||
|
||||
var DB *gorm.DB
|
||||
|
||||
func InitDB() (*gorm.DB, error) {
|
||||
// TODO: 从配置中心读取DSN
|
||||
dsn := "root:1236@tcp(127.0.0.1:3306)/gpt_load?charset=utf8mb4&parseTime=True&loc=Local"
|
||||
|
||||
newLogger := logger.New(
|
||||
log.New(os.Stdout, "\r\n", log.LstdFlags), // io writer
|
||||
logger.Config{
|
||||
SlowThreshold: time.Second, // Slow SQL threshold
|
||||
LogLevel: logger.Info, // Log level
|
||||
IgnoreRecordNotFoundError: true, // Ignore ErrRecordNotFound error for logger
|
||||
Colorful: true, // Disable color
|
||||
},
|
||||
)
|
||||
|
||||
var err error
|
||||
DB, err = gorm.Open(mysql.Open(dsn), &gorm.Config{
|
||||
Logger: newLogger,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to database: %w", err)
|
||||
}
|
||||
|
||||
sqlDB, err := DB.DB()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get sql.DB: %w", err)
|
||||
}
|
||||
|
||||
// Set connection pool parameters
|
||||
sqlDB.SetMaxIdleConns(10)
|
||||
sqlDB.SetMaxOpenConns(100)
|
||||
sqlDB.SetConnMaxLifetime(time.Hour)
|
||||
|
||||
// Auto-migrate models
|
||||
err = DB.AutoMigrate(
|
||||
&models.SystemSetting{},
|
||||
&models.Group{},
|
||||
&models.APIKey{},
|
||||
&models.RequestLog{},
|
||||
)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to auto-migrate database: %w", err)
|
||||
}
|
||||
|
||||
fmt.Println("Database connection initialized and models migrated.")
|
||||
return DB, nil
|
||||
}
|
62
internal/handler/dashboard_handler.go
Normal file
62
internal/handler/dashboard_handler.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"gpt-load/internal/db"
|
||||
"gpt-load/internal/models"
|
||||
"gpt-load/internal/response"
|
||||
)
|
||||
|
||||
// GetDashboardStats godoc
|
||||
// @Summary Get dashboard statistics
|
||||
// @Description Get statistics for the dashboard, including total requests, success rate, and group distribution.
|
||||
// @Tags Dashboard
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Success 200 {object} models.DashboardStats
|
||||
// @Router /api/dashboard/stats [get]
|
||||
func GetDashboardStats(c *gin.Context) {
|
||||
var totalRequests, successRequests int64
|
||||
var groupStats []models.GroupRequestStat
|
||||
|
||||
// Get total requests
|
||||
if err := db.DB.Model(&models.RequestLog{}).Count(&totalRequests).Error; err != nil {
|
||||
response.Error(c, http.StatusInternalServerError, "Failed to get total requests")
|
||||
return
|
||||
}
|
||||
|
||||
// Get success requests (status code 2xx)
|
||||
if err := db.DB.Model(&models.RequestLog{}).Where("status_code >= ? AND status_code < ?", 200, 300).Count(&successRequests).Error; err != nil {
|
||||
response.Error(c, http.StatusInternalServerError, "Failed to get success requests")
|
||||
return
|
||||
}
|
||||
|
||||
// Calculate success rate
|
||||
var successRate float64
|
||||
if totalRequests > 0 {
|
||||
successRate = float64(successRequests) / float64(totalRequests)
|
||||
}
|
||||
|
||||
// Get group stats
|
||||
err := db.DB.Table("request_logs").
|
||||
Select("groups.name as group_name, count(request_logs.id) as request_count").
|
||||
Joins("join groups on groups.id = request_logs.group_id").
|
||||
Group("groups.name").
|
||||
Order("request_count desc").
|
||||
Scan(&groupStats).Error
|
||||
if err != nil {
|
||||
response.Error(c, http.StatusInternalServerError, "Failed to get group stats")
|
||||
return
|
||||
}
|
||||
|
||||
stats := models.DashboardStats{
|
||||
TotalRequests: totalRequests,
|
||||
SuccessRequests: successRequests,
|
||||
SuccessRate: successRate,
|
||||
GroupStats: groupStats,
|
||||
}
|
||||
|
||||
response.Success(c, stats)
|
||||
}
|
125
internal/handler/group_handler.go
Normal file
125
internal/handler/group_handler.go
Normal file
@@ -0,0 +1,125 @@
|
||||
// Package handler provides HTTP handlers for the application
|
||||
package handler
|
||||
|
||||
import (
|
||||
"gpt-load/internal/models"
|
||||
"gpt-load/internal/response"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// CreateGroup handles the creation of a new group.
|
||||
func (s *Server) CreateGroup(c *gin.Context) {
|
||||
var group models.Group
|
||||
if err := c.ShouldBindJSON(&group); err != nil {
|
||||
response.Error(c, http.StatusBadRequest, "Invalid request body")
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.DB.Create(&group).Error; err != nil {
|
||||
response.Error(c, http.StatusInternalServerError, "Failed to create group")
|
||||
return
|
||||
}
|
||||
|
||||
response.Success(c, group)
|
||||
}
|
||||
|
||||
// ListGroups handles listing all groups.
|
||||
func (s *Server) ListGroups(c *gin.Context) {
|
||||
var groups []models.Group
|
||||
if err := s.DB.Find(&groups).Error; err != nil {
|
||||
response.Error(c, http.StatusInternalServerError, "Failed to list groups")
|
||||
return
|
||||
}
|
||||
response.Success(c, groups)
|
||||
}
|
||||
|
||||
// GetGroup handles getting a single group by its ID.
|
||||
func (s *Server) GetGroup(c *gin.Context) {
|
||||
id, err := strconv.Atoi(c.Param("id"))
|
||||
if err != nil {
|
||||
response.Error(c, http.StatusBadRequest, "Invalid group ID")
|
||||
return
|
||||
}
|
||||
|
||||
var group models.Group
|
||||
if err := s.DB.Preload("APIKeys").First(&group, id).Error; err != nil {
|
||||
response.Error(c, http.StatusNotFound, "Group not found")
|
||||
return
|
||||
}
|
||||
|
||||
response.Success(c, group)
|
||||
}
|
||||
|
||||
// UpdateGroup handles updating an existing group.
|
||||
func (s *Server) UpdateGroup(c *gin.Context) {
|
||||
id, err := strconv.Atoi(c.Param("id"))
|
||||
if err != nil {
|
||||
response.Error(c, http.StatusBadRequest, "Invalid group ID")
|
||||
return
|
||||
}
|
||||
|
||||
var group models.Group
|
||||
if err := s.DB.First(&group, id).Error; err != nil {
|
||||
response.Error(c, http.StatusNotFound, "Group not found")
|
||||
return
|
||||
}
|
||||
|
||||
var updateData models.Group
|
||||
if err := c.ShouldBindJSON(&updateData); err != nil {
|
||||
response.Error(c, http.StatusBadRequest, "Invalid request body")
|
||||
return
|
||||
}
|
||||
|
||||
// We only allow updating certain fields
|
||||
group.Name = updateData.Name
|
||||
group.Description = updateData.Description
|
||||
group.ChannelType = updateData.ChannelType
|
||||
group.Config = updateData.Config
|
||||
|
||||
if err := s.DB.Save(&group).Error; err != nil {
|
||||
response.Error(c, http.StatusInternalServerError, "Failed to update group")
|
||||
return
|
||||
}
|
||||
|
||||
response.Success(c, group)
|
||||
}
|
||||
|
||||
// DeleteGroup handles deleting a group.
|
||||
func (s *Server) DeleteGroup(c *gin.Context) {
|
||||
id, err := strconv.Atoi(c.Param("id"))
|
||||
if err != nil {
|
||||
response.Error(c, http.StatusBadRequest, "Invalid group ID")
|
||||
return
|
||||
}
|
||||
|
||||
// Use a transaction to ensure atomicity
|
||||
tx := s.DB.Begin()
|
||||
if tx.Error != nil {
|
||||
response.Error(c, http.StatusInternalServerError, "Failed to start transaction")
|
||||
return
|
||||
}
|
||||
|
||||
// Also delete associated API keys
|
||||
if err := tx.Where("group_id = ?", id).Delete(&models.APIKey{}).Error; err != nil {
|
||||
tx.Rollback()
|
||||
response.Error(c, http.StatusInternalServerError, "Failed to delete associated API keys")
|
||||
return
|
||||
}
|
||||
|
||||
if err := tx.Delete(&models.Group{}, id).Error; err != nil {
|
||||
tx.Rollback()
|
||||
response.Error(c, http.StatusInternalServerError, "Failed to delete group")
|
||||
return
|
||||
}
|
||||
|
||||
if err := tx.Commit().Error; err != nil {
|
||||
tx.Rollback()
|
||||
response.Error(c, http.StatusInternalServerError, "Failed to commit transaction")
|
||||
return
|
||||
}
|
||||
|
||||
response.Success(c, gin.H{"message": "Group and associated keys deleted successfully"})
|
||||
}
|
@@ -6,35 +6,80 @@ import (
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"gpt-load/internal/models"
|
||||
"gpt-load/internal/types"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/sirupsen/logrus"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// Handler contains dependencies for HTTP handlers
|
||||
type Handler struct {
|
||||
keyManager types.KeyManager
|
||||
config types.ConfigManager
|
||||
// Server contains dependencies for HTTP handlers
|
||||
type Server struct {
|
||||
DB *gorm.DB
|
||||
config types.ConfigManager
|
||||
}
|
||||
|
||||
// NewHandler creates a new handler instance
|
||||
func NewHandler(keyManager types.KeyManager, config types.ConfigManager) *Handler {
|
||||
return &Handler{
|
||||
keyManager: keyManager,
|
||||
config: config,
|
||||
// NewServer creates a new handler instance
|
||||
func NewServer(db *gorm.DB, config types.ConfigManager) *Server {
|
||||
return &Server{
|
||||
DB: db,
|
||||
config: config,
|
||||
}
|
||||
}
|
||||
|
||||
// RegisterAPIRoutes registers all API routes under a given router group
|
||||
func (s *Server) RegisterAPIRoutes(api *gin.RouterGroup) {
|
||||
// Group management routes
|
||||
groups := api.Group("/groups")
|
||||
{
|
||||
groups.POST("", s.CreateGroup)
|
||||
groups.GET("", s.ListGroups)
|
||||
groups.GET("/:id", s.GetGroup)
|
||||
groups.PUT("/:id", s.UpdateGroup)
|
||||
groups.DELETE("/:id", s.DeleteGroup)
|
||||
|
||||
// Key management routes within a group
|
||||
keys := groups.Group("/:id/keys")
|
||||
{
|
||||
keys.POST("", s.CreateKeysInGroup)
|
||||
keys.GET("", s.ListKeysInGroup)
|
||||
}
|
||||
}
|
||||
|
||||
// Key management routes
|
||||
api.PUT("/keys/:key_id", s.UpdateKey)
|
||||
api.DELETE("/keys", s.DeleteKeys)
|
||||
|
||||
// Dashboard and logs routes
|
||||
dashboard := api.Group("/dashboard")
|
||||
{
|
||||
dashboard.GET("/stats", GetDashboardStats)
|
||||
}
|
||||
|
||||
api.GET("/logs", GetLogs)
|
||||
|
||||
// Settings routes
|
||||
settings := api.Group("/settings")
|
||||
{
|
||||
settings.GET("", GetSettings)
|
||||
settings.PUT("", UpdateSettings)
|
||||
}
|
||||
|
||||
// Reload route
|
||||
api.POST("/reload", s.ReloadConfig)
|
||||
}
|
||||
|
||||
// Health handles health check requests
|
||||
func (h *Handler) Health(c *gin.Context) {
|
||||
stats := h.keyManager.GetStats()
|
||||
func (s *Server) Health(c *gin.Context) {
|
||||
var totalKeys, healthyKeys int64
|
||||
s.DB.Model(&models.APIKey{}).Count(&totalKeys)
|
||||
s.DB.Model(&models.APIKey{}).Where("status = ?", "active").Count(&healthyKeys)
|
||||
|
||||
status := "healthy"
|
||||
httpStatus := http.StatusOK
|
||||
|
||||
// Check if there are any healthy keys
|
||||
if stats.HealthyKeys == 0 {
|
||||
if healthyKeys == 0 && totalKeys > 0 {
|
||||
status = "unhealthy"
|
||||
httpStatus = http.StatusServiceUnavailable
|
||||
}
|
||||
@@ -50,15 +95,23 @@ func (h *Handler) Health(c *gin.Context) {
|
||||
c.JSON(httpStatus, gin.H{
|
||||
"status": status,
|
||||
"timestamp": time.Now().UTC().Format(time.RFC3339),
|
||||
"healthy_keys": stats.HealthyKeys,
|
||||
"total_keys": stats.TotalKeys,
|
||||
"healthy_keys": healthyKeys,
|
||||
"total_keys": totalKeys,
|
||||
"uptime": uptime,
|
||||
})
|
||||
}
|
||||
|
||||
// Stats handles statistics requests
|
||||
func (h *Handler) Stats(c *gin.Context) {
|
||||
stats := h.keyManager.GetStats()
|
||||
func (s *Server) Stats(c *gin.Context) {
|
||||
var totalKeys, healthyKeys, disabledKeys int64
|
||||
s.DB.Model(&models.APIKey{}).Count(&totalKeys)
|
||||
s.DB.Model(&models.APIKey{}).Where("status = ?", "active").Count(&healthyKeys)
|
||||
s.DB.Model(&models.APIKey{}).Where("status != ?", "active").Count(&disabledKeys)
|
||||
|
||||
// TODO: Get request counts from the database
|
||||
var successCount, failureCount int64
|
||||
s.DB.Model(&models.RequestLog{}).Where("status_code = ?", http.StatusOK).Count(&successCount)
|
||||
s.DB.Model(&models.RequestLog{}).Where("status_code != ?", http.StatusOK).Count(&failureCount)
|
||||
|
||||
// Add additional system information
|
||||
var m runtime.MemStats
|
||||
@@ -66,15 +119,14 @@ func (h *Handler) Stats(c *gin.Context) {
|
||||
|
||||
response := gin.H{
|
||||
"keys": gin.H{
|
||||
"total": stats.TotalKeys,
|
||||
"healthy": stats.HealthyKeys,
|
||||
"blacklisted": stats.BlacklistedKeys,
|
||||
"current_index": stats.CurrentIndex,
|
||||
"total": totalKeys,
|
||||
"healthy": healthyKeys,
|
||||
"disabled": disabledKeys,
|
||||
},
|
||||
"requests": gin.H{
|
||||
"success_count": stats.SuccessCount,
|
||||
"failure_count": stats.FailureCount,
|
||||
"total_count": stats.SuccessCount + stats.FailureCount,
|
||||
"success_count": successCount,
|
||||
"failure_count": failureCount,
|
||||
"total_count": successCount + failureCount,
|
||||
},
|
||||
"memory": gin.H{
|
||||
"alloc_mb": bToMb(m.Alloc),
|
||||
@@ -95,48 +147,9 @@ func (h *Handler) Stats(c *gin.Context) {
|
||||
c.JSON(http.StatusOK, response)
|
||||
}
|
||||
|
||||
// Blacklist handles blacklist requests
|
||||
func (h *Handler) Blacklist(c *gin.Context) {
|
||||
blacklist := h.keyManager.GetBlacklist()
|
||||
|
||||
response := gin.H{
|
||||
"blacklisted_keys": blacklist,
|
||||
"count": len(blacklist),
|
||||
"timestamp": time.Now().UTC().Format(time.RFC3339),
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, response)
|
||||
}
|
||||
|
||||
// ResetKeys handles key reset requests
|
||||
func (h *Handler) ResetKeys(c *gin.Context) {
|
||||
// Reset blacklist
|
||||
h.keyManager.ResetBlacklist()
|
||||
|
||||
// Reload keys from file
|
||||
if err := h.keyManager.LoadKeys(); err != nil {
|
||||
logrus.Errorf("Failed to reload keys: %v", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{
|
||||
"error": "Failed to reload keys",
|
||||
"message": err.Error(),
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
stats := h.keyManager.GetStats()
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"message": "Keys reset and reloaded successfully",
|
||||
"total_keys": stats.TotalKeys,
|
||||
"healthy_keys": stats.HealthyKeys,
|
||||
"timestamp": time.Now().UTC().Format(time.RFC3339),
|
||||
})
|
||||
|
||||
logrus.Info("Keys reset and reloaded successfully")
|
||||
}
|
||||
|
||||
// MethodNotAllowed handles 405 requests
|
||||
func (h *Handler) MethodNotAllowed(c *gin.Context) {
|
||||
func (s *Server) MethodNotAllowed(c *gin.Context) {
|
||||
c.JSON(http.StatusMethodNotAllowed, gin.H{
|
||||
"error": "Method not allowed",
|
||||
"path": c.Request.URL.Path,
|
||||
@@ -146,7 +159,7 @@ func (h *Handler) MethodNotAllowed(c *gin.Context) {
|
||||
}
|
||||
|
||||
// GetConfig returns configuration information (for debugging)
|
||||
func (h *Handler) GetConfig(c *gin.Context) {
|
||||
func (s *Server) GetConfig(c *gin.Context) {
|
||||
// Only allow in development mode or with special header
|
||||
if c.GetHeader("X-Debug-Config") != "true" {
|
||||
c.JSON(http.StatusForbidden, gin.H{
|
||||
@@ -155,13 +168,13 @@ func (h *Handler) GetConfig(c *gin.Context) {
|
||||
return
|
||||
}
|
||||
|
||||
serverConfig := h.config.GetServerConfig()
|
||||
keysConfig := h.config.GetKeysConfig()
|
||||
openaiConfig := h.config.GetOpenAIConfig()
|
||||
authConfig := h.config.GetAuthConfig()
|
||||
corsConfig := h.config.GetCORSConfig()
|
||||
perfConfig := h.config.GetPerformanceConfig()
|
||||
logConfig := h.config.GetLogConfig()
|
||||
serverConfig := s.config.GetServerConfig()
|
||||
keysConfig := s.config.GetKeysConfig()
|
||||
openaiConfig := s.config.GetOpenAIConfig()
|
||||
authConfig := s.config.GetAuthConfig()
|
||||
corsConfig := s.config.GetCORSConfig()
|
||||
perfConfig := s.config.GetPerformanceConfig()
|
||||
logConfig := s.config.GetLogConfig()
|
||||
|
||||
// Sanitize sensitive information
|
||||
sanitizedConfig := gin.H{
|
||||
|
119
internal/handler/key_handler.go
Normal file
119
internal/handler/key_handler.go
Normal file
@@ -0,0 +1,119 @@
|
||||
// Package handler provides HTTP handlers for the application
|
||||
package handler
|
||||
|
||||
import (
|
||||
"gpt-load/internal/models"
|
||||
"gpt-load/internal/response"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
type CreateKeysRequest struct {
|
||||
Keys []string `json:"keys" binding:"required"`
|
||||
}
|
||||
|
||||
// CreateKeysInGroup handles creating new keys within a specific group.
|
||||
func (s *Server) CreateKeysInGroup(c *gin.Context) {
|
||||
groupID, err := strconv.Atoi(c.Param("id"))
|
||||
if err != nil {
|
||||
response.Error(c, http.StatusBadRequest, "Invalid group ID")
|
||||
return
|
||||
}
|
||||
|
||||
var req CreateKeysRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
response.Error(c, http.StatusBadRequest, "Invalid request body")
|
||||
return
|
||||
}
|
||||
|
||||
var newKeys []models.APIKey
|
||||
for _, keyVal := range req.Keys {
|
||||
newKeys = append(newKeys, models.APIKey{
|
||||
GroupID: uint(groupID),
|
||||
KeyValue: keyVal,
|
||||
Status: "active",
|
||||
})
|
||||
}
|
||||
|
||||
if err := s.DB.Create(&newKeys).Error; err != nil {
|
||||
response.Error(c, http.StatusInternalServerError, "Failed to create keys")
|
||||
return
|
||||
}
|
||||
|
||||
response.Success(c, newKeys)
|
||||
}
|
||||
|
||||
// ListKeysInGroup handles listing all keys within a specific group.
|
||||
func (s *Server) ListKeysInGroup(c *gin.Context) {
|
||||
groupID, err := strconv.Atoi(c.Param("id"))
|
||||
if err != nil {
|
||||
response.Error(c, http.StatusBadRequest, "Invalid group ID")
|
||||
return
|
||||
}
|
||||
|
||||
var keys []models.APIKey
|
||||
if err := s.DB.Where("group_id = ?", groupID).Find(&keys).Error; err != nil {
|
||||
response.Error(c, http.StatusInternalServerError, "Failed to list keys")
|
||||
return
|
||||
}
|
||||
|
||||
response.Success(c, keys)
|
||||
}
|
||||
|
||||
// UpdateKey handles updating a specific key.
|
||||
func (s *Server) UpdateKey(c *gin.Context) {
|
||||
keyID, err := strconv.Atoi(c.Param("key_id"))
|
||||
if err != nil {
|
||||
response.Error(c, http.StatusBadRequest, "Invalid key ID")
|
||||
return
|
||||
}
|
||||
|
||||
var key models.APIKey
|
||||
if err := s.DB.First(&key, keyID).Error; err != nil {
|
||||
response.Error(c, http.StatusNotFound, "Key not found")
|
||||
return
|
||||
}
|
||||
|
||||
var updateData struct {
|
||||
Status string `json:"status"`
|
||||
}
|
||||
if err := c.ShouldBindJSON(&updateData); err != nil {
|
||||
response.Error(c, http.StatusBadRequest, "Invalid request body")
|
||||
return
|
||||
}
|
||||
|
||||
key.Status = updateData.Status
|
||||
if err := s.DB.Save(&key).Error; err != nil {
|
||||
response.Error(c, http.StatusInternalServerError, "Failed to update key")
|
||||
return
|
||||
}
|
||||
|
||||
response.Success(c, key)
|
||||
}
|
||||
|
||||
type DeleteKeysRequest struct {
|
||||
KeyIDs []uint `json:"key_ids" binding:"required"`
|
||||
}
|
||||
|
||||
// DeleteKeys handles deleting one or more keys.
|
||||
func (s *Server) DeleteKeys(c *gin.Context) {
|
||||
var req DeleteKeysRequest
|
||||
if err := c.ShouldBindJSON(&req); err != nil {
|
||||
response.Error(c, http.StatusBadRequest, "Invalid request body")
|
||||
return
|
||||
}
|
||||
|
||||
if len(req.KeyIDs) == 0 {
|
||||
response.Error(c, http.StatusBadRequest, "No key IDs provided")
|
||||
return
|
||||
}
|
||||
|
||||
if err := s.DB.Delete(&models.APIKey{}, req.KeyIDs).Error; err != nil {
|
||||
response.Error(c, http.StatusInternalServerError, "Failed to delete keys")
|
||||
return
|
||||
}
|
||||
|
||||
response.Success(c, gin.H{"message": "Keys deleted successfully"})
|
||||
}
|
79
internal/handler/log_handler.go
Normal file
79
internal/handler/log_handler.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"gpt-load/internal/db"
|
||||
"gpt-load/internal/models"
|
||||
"gpt-load/internal/response"
|
||||
)
|
||||
|
||||
// GetLogs godoc
|
||||
// @Summary Get request logs
|
||||
// @Description Get request logs with pagination and filtering
|
||||
// @Tags Logs
|
||||
// @Accept json
|
||||
// @Produce json
|
||||
// @Param page query int false "Page number"
|
||||
// @Param size query int false "Page size"
|
||||
// @Param group_id query int false "Group ID"
|
||||
// @Param start_time query string false "Start time (RFC3339)"
|
||||
// @Param end_time query string false "End time (RFC3339)"
|
||||
// @Param status_code query int false "Status code"
|
||||
// @Success 200 {array} models.RequestLog
|
||||
// @Router /api/logs [get]
|
||||
func GetLogs(c *gin.Context) {
|
||||
page, _ := strconv.Atoi(c.DefaultQuery("page", "1"))
|
||||
size, _ := strconv.Atoi(c.DefaultQuery("size", "10"))
|
||||
offset := (page - 1) * size
|
||||
|
||||
query := db.DB.Model(&models.RequestLog{})
|
||||
|
||||
if groupIDStr := c.Query("group_id"); groupIDStr != "" {
|
||||
groupID, err := strconv.Atoi(groupIDStr)
|
||||
if err == nil {
|
||||
query = query.Where("group_id = ?", groupID)
|
||||
}
|
||||
}
|
||||
|
||||
if startTimeStr := c.Query("start_time"); startTimeStr != "" {
|
||||
startTime, err := time.Parse(time.RFC3339, startTimeStr)
|
||||
if err == nil {
|
||||
query = query.Where("timestamp >= ?", startTime)
|
||||
}
|
||||
}
|
||||
|
||||
if endTimeStr := c.Query("end_time"); endTimeStr != "" {
|
||||
endTime, err := time.Parse(time.RFC3339, endTimeStr)
|
||||
if err == nil {
|
||||
query = query.Where("timestamp <= ?", endTime)
|
||||
}
|
||||
}
|
||||
|
||||
if statusCodeStr := c.Query("status_code"); statusCodeStr != "" {
|
||||
statusCode, err := strconv.Atoi(statusCodeStr)
|
||||
if err == nil {
|
||||
query = query.Where("status_code = ?", statusCode)
|
||||
}
|
||||
}
|
||||
|
||||
var logs []models.RequestLog
|
||||
var total int64
|
||||
|
||||
query.Count(&total)
|
||||
err := query.Order("timestamp desc").Offset(offset).Limit(size).Find(&logs).Error
|
||||
if err != nil {
|
||||
response.Error(c, http.StatusInternalServerError, "Failed to get logs")
|
||||
return
|
||||
}
|
||||
|
||||
c.JSON(http.StatusOK, gin.H{
|
||||
"total": total,
|
||||
"page": page,
|
||||
"size": size,
|
||||
"data": logs,
|
||||
})
|
||||
}
|
26
internal/handler/reload_handler.go
Normal file
26
internal/handler/reload_handler.go
Normal file
@@ -0,0 +1,26 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"gpt-load/internal/response"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// ReloadConfig handles the POST /api/reload request.
|
||||
// It triggers a configuration reload.
|
||||
func (s *Server) ReloadConfig(c *gin.Context) {
|
||||
if s.config == nil {
|
||||
response.InternalError(c, "Configuration manager is not initialized")
|
||||
return
|
||||
}
|
||||
|
||||
err := s.config.ReloadConfig()
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to reload config: %v", err)
|
||||
response.InternalError(c, "Failed to reload config")
|
||||
return
|
||||
}
|
||||
|
||||
response.Success(c, gin.H{"message": "Configuration reloaded successfully"})
|
||||
}
|
62
internal/handler/settings_handler.go
Normal file
62
internal/handler/settings_handler.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package handler
|
||||
|
||||
import (
|
||||
"gpt-load/internal/db"
|
||||
"gpt-load/internal/models"
|
||||
"gpt-load/internal/response"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"gorm.io/gorm/clause"
|
||||
)
|
||||
|
||||
// GetSettings handles the GET /api/settings request.
|
||||
// It retrieves all system settings from the database and returns them as a key-value map.
|
||||
func GetSettings(c *gin.Context) {
|
||||
var settings []models.SystemSetting
|
||||
if err := db.DB.Find(&settings).Error; err != nil {
|
||||
response.InternalError(c, "Failed to retrieve settings")
|
||||
return
|
||||
}
|
||||
|
||||
settingsMap := make(map[string]string)
|
||||
for _, s := range settings {
|
||||
settingsMap[s.SettingKey] = s.SettingValue
|
||||
}
|
||||
|
||||
response.Success(c, settingsMap)
|
||||
}
|
||||
|
||||
// UpdateSettings handles the PUT /api/settings request.
|
||||
// It receives a key-value JSON object and updates or creates settings in the database.
|
||||
func UpdateSettings(c *gin.Context) {
|
||||
var settingsMap map[string]string
|
||||
if err := c.ShouldBindJSON(&settingsMap); err != nil {
|
||||
response.BadRequest(c, "Invalid request body")
|
||||
return
|
||||
}
|
||||
|
||||
var settingsToUpdate []models.SystemSetting
|
||||
for key, value := range settingsMap {
|
||||
settingsToUpdate = append(settingsToUpdate, models.SystemSetting{
|
||||
SettingKey: key,
|
||||
SettingValue: value,
|
||||
})
|
||||
}
|
||||
|
||||
if len(settingsToUpdate) == 0 {
|
||||
response.Success(c, nil)
|
||||
return
|
||||
}
|
||||
|
||||
// Using OnConflict to perform an "upsert" operation.
|
||||
// If a setting with the same key exists, it will be updated. Otherwise, a new one will be created.
|
||||
if err := db.DB.Clauses(clause.OnConflict{
|
||||
Columns: []clause.Column{{Name: "setting_key"}},
|
||||
DoUpdates: clause.AssignmentColumns([]string{"setting_value"}),
|
||||
}).Create(&settingsToUpdate).Error; err != nil {
|
||||
response.InternalError(c, "Failed to update settings")
|
||||
return
|
||||
}
|
||||
|
||||
response.Success(c, nil)
|
||||
}
|
66
internal/models/types.go
Normal file
66
internal/models/types.go
Normal file
@@ -0,0 +1,66 @@
|
||||
package models
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// SystemSetting 对应 system_settings 表
|
||||
type SystemSetting struct {
|
||||
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
|
||||
SettingKey string `gorm:"type:varchar(255);not null;unique" json:"setting_key"`
|
||||
SettingValue string `gorm:"type:text;not null" json:"setting_value"`
|
||||
Description string `gorm:"type:varchar(512)" json:"description"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
}
|
||||
|
||||
// Group 对应 groups 表
|
||||
type Group struct {
|
||||
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
|
||||
Name string `gorm:"type:varchar(255);not null;unique" json:"name"`
|
||||
Description string `gorm:"type:varchar(512)" json:"description"`
|
||||
ChannelType string `gorm:"type:varchar(50);not null" json:"channel_type"`
|
||||
Config string `gorm:"type:json" json:"config"`
|
||||
APIKeys []APIKey `gorm:"foreignKey:GroupID" json:"api_keys"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
}
|
||||
|
||||
// APIKey 对应 api_keys 表
|
||||
type APIKey struct {
|
||||
ID uint `gorm:"primaryKey;autoIncrement" json:"id"`
|
||||
GroupID uint `gorm:"not null" json:"group_id"`
|
||||
KeyValue string `gorm:"type:varchar(512);not null" json:"key_value"`
|
||||
Status string `gorm:"type:varchar(50);not null;default:'active'" json:"status"`
|
||||
RequestCount int64 `gorm:"not null;default:0" json:"request_count"`
|
||||
FailureCount int64 `gorm:"not null;default:0" json:"failure_count"`
|
||||
LastUsedAt *time.Time `json:"last_used_at"`
|
||||
CreatedAt time.Time `json:"created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at"`
|
||||
}
|
||||
|
||||
// RequestLog 对应 request_logs 表
|
||||
type RequestLog struct {
|
||||
ID string `gorm:"type:varchar(36);primaryKey" json:"id"`
|
||||
Timestamp time.Time `gorm:"type:datetime(3);not null" json:"timestamp"`
|
||||
GroupID uint `gorm:"not null" json:"group_id"`
|
||||
KeyID uint `gorm:"not null" json:"key_id"`
|
||||
SourceIP string `gorm:"type:varchar(45)" json:"source_ip"`
|
||||
StatusCode int `gorm:"not null" json:"status_code"`
|
||||
RequestPath string `gorm:"type:varchar(1024)" json:"request_path"`
|
||||
RequestBodySnippet string `gorm:"type:text" json:"request_body_snippet"`
|
||||
}
|
||||
|
||||
// GroupRequestStat 用于表示每个分组的请求统计
|
||||
type GroupRequestStat struct {
|
||||
GroupName string `json:"group_name"`
|
||||
RequestCount int64 `json:"request_count"`
|
||||
}
|
||||
|
||||
// DashboardStats 用于仪表盘的统计数据
|
||||
type DashboardStats struct {
|
||||
TotalRequests int64 `json:"total_requests"`
|
||||
SuccessRequests int64 `json:"success_requests"`
|
||||
SuccessRate float64 `json:"success_rate"`
|
||||
GroupStats []GroupRequestStat `json:"group_stats"`
|
||||
}
|
@@ -2,498 +2,118 @@
|
||||
package proxy
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"gpt-load/internal/channel"
|
||||
"gpt-load/internal/models"
|
||||
"gpt-load/internal/response"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"gpt-load/internal/errors"
|
||||
"gpt-load/internal/types"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/sirupsen/logrus"
|
||||
"gorm.io/gorm"
|
||||
)
|
||||
|
||||
// A list of errors that are considered normal during streaming when a client disconnects.
|
||||
var ignorableStreamErrors = []string{
|
||||
"context canceled",
|
||||
"connection reset by peer",
|
||||
"broken pipe",
|
||||
"use of closed network connection",
|
||||
}
|
||||
|
||||
// isIgnorableStreamError checks if the error is a common, non-critical error that can occur
|
||||
// when a client disconnects during a streaming response.
|
||||
func isIgnorableStreamError(err error) bool {
|
||||
errStr := err.Error()
|
||||
for _, ignorableError := range ignorableStreamErrors {
|
||||
if strings.Contains(errStr, ignorableError) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// ProxyServer represents the proxy server
|
||||
type ProxyServer struct {
|
||||
keyManager types.KeyManager
|
||||
configManager types.ConfigManager
|
||||
httpClient *http.Client
|
||||
streamClient *http.Client // Dedicated client for streaming
|
||||
requestCount int64
|
||||
startTime time.Time
|
||||
DB *gorm.DB
|
||||
groupCounters sync.Map // For round-robin key selection
|
||||
requestLogChan chan models.RequestLog
|
||||
}
|
||||
|
||||
// NewProxyServer creates a new proxy server
|
||||
func NewProxyServer(keyManager types.KeyManager, configManager types.ConfigManager) (*ProxyServer, error) {
|
||||
openaiConfig := configManager.GetOpenAIConfig()
|
||||
perfConfig := configManager.GetPerformanceConfig()
|
||||
|
||||
// Create high-performance HTTP client
|
||||
transport := &http.Transport{
|
||||
MaxIdleConns: 100,
|
||||
MaxIdleConnsPerHost: 20,
|
||||
MaxConnsPerHost: 0,
|
||||
IdleConnTimeout: time.Duration(openaiConfig.IdleConnTimeout) * time.Second,
|
||||
TLSHandshakeTimeout: 15 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
DisableCompression: !perfConfig.EnableGzip,
|
||||
ForceAttemptHTTP2: true,
|
||||
WriteBufferSize: 32 * 1024,
|
||||
ReadBufferSize: 32 * 1024,
|
||||
}
|
||||
|
||||
// Create dedicated transport for streaming, optimize TCP parameters
|
||||
streamTransport := &http.Transport{
|
||||
MaxIdleConns: 200,
|
||||
MaxIdleConnsPerHost: 40,
|
||||
MaxConnsPerHost: 0,
|
||||
IdleConnTimeout: time.Duration(openaiConfig.IdleConnTimeout) * time.Second,
|
||||
TLSHandshakeTimeout: 15 * time.Second,
|
||||
ExpectContinueTimeout: 1 * time.Second,
|
||||
DisableCompression: true,
|
||||
ForceAttemptHTTP2: true,
|
||||
WriteBufferSize: 0,
|
||||
ReadBufferSize: 0,
|
||||
ResponseHeaderTimeout: time.Duration(openaiConfig.ResponseTimeout) * time.Second,
|
||||
}
|
||||
|
||||
httpClient := &http.Client{
|
||||
Transport: transport,
|
||||
Timeout: time.Duration(openaiConfig.RequestTimeout) * time.Second,
|
||||
}
|
||||
|
||||
// Streaming client without overall timeout
|
||||
streamClient := &http.Client{
|
||||
Transport: streamTransport,
|
||||
}
|
||||
|
||||
func NewProxyServer(db *gorm.DB, requestLogChan chan models.RequestLog) (*ProxyServer, error) {
|
||||
return &ProxyServer{
|
||||
keyManager: keyManager,
|
||||
configManager: configManager,
|
||||
httpClient: httpClient,
|
||||
streamClient: streamClient,
|
||||
startTime: time.Now(),
|
||||
DB: db,
|
||||
groupCounters: sync.Map{},
|
||||
requestLogChan: requestLogChan,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// HandleProxy handles proxy requests
|
||||
// RegisterProxyRoutes registers the main proxy route under a given router group
|
||||
func (ps *ProxyServer) RegisterProxyRoutes(proxy *gin.RouterGroup) {
|
||||
proxy.Any("/:group_name/*path", ps.HandleProxy)
|
||||
}
|
||||
|
||||
// HandleProxy handles the main proxy logic
|
||||
func (ps *ProxyServer) HandleProxy(c *gin.Context) {
|
||||
startTime := time.Now()
|
||||
groupName := c.Param("group_name")
|
||||
|
||||
// Increment request count
|
||||
atomic.AddInt64(&ps.requestCount, 1)
|
||||
|
||||
// Cache all request body upfront
|
||||
var bodyBytes []byte
|
||||
if c.Request.Body != nil {
|
||||
var err error
|
||||
bodyBytes, err = io.ReadAll(c.Request.Body)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to read request body: %v", err)
|
||||
c.JSON(http.StatusBadRequest, gin.H{
|
||||
"error": "Failed to read request body",
|
||||
"code": errors.ErrProxyRequest,
|
||||
})
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Determine if this is a streaming request using cached data
|
||||
isStreamRequest := ps.isStreamRequest(bodyBytes, c)
|
||||
|
||||
// Execute request with retry
|
||||
ps.executeRequestWithRetry(c, startTime, bodyBytes, isStreamRequest, 0, nil)
|
||||
}
|
||||
|
||||
// isStreamRequest determines if this is a streaming request
|
||||
func (ps *ProxyServer) isStreamRequest(bodyBytes []byte, c *gin.Context) bool {
|
||||
// Check for Gemini streaming endpoint
|
||||
if strings.HasSuffix(c.Request.URL.Path, ":streamGenerateContent") {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check Accept header
|
||||
if strings.Contains(c.GetHeader("Accept"), "text/event-stream") {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check URL query parameter
|
||||
if c.Query("stream") == "true" {
|
||||
return true
|
||||
}
|
||||
|
||||
// Check stream parameter in request body
|
||||
if len(bodyBytes) > 0 {
|
||||
var bodyJSON map[string]interface{}
|
||||
if err := json.Unmarshal(bodyBytes, &bodyJSON); err == nil {
|
||||
if stream, ok := bodyJSON["stream"].(bool); ok && stream {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// executeRequestWithRetry executes request with retry logic
|
||||
func (ps *ProxyServer) executeRequestWithRetry(c *gin.Context, startTime time.Time, bodyBytes []byte, isStreamRequest bool, retryCount int, retryErrors []types.RetryError) {
|
||||
keysConfig := ps.configManager.GetKeysConfig()
|
||||
|
||||
if retryCount > keysConfig.MaxRetries {
|
||||
logrus.Debugf("Max retries exceeded (%d)", retryCount-1)
|
||||
|
||||
errorResponse := gin.H{
|
||||
"error": "Max retries exceeded",
|
||||
"code": errors.ErrProxyRetryExhausted,
|
||||
"retry_count": retryCount - 1,
|
||||
"retry_errors": retryErrors,
|
||||
"timestamp": time.Now().UTC().Format(time.RFC3339),
|
||||
}
|
||||
|
||||
statusCode := http.StatusBadGateway
|
||||
if len(retryErrors) > 0 && retryErrors[len(retryErrors)-1].StatusCode > 0 {
|
||||
statusCode = retryErrors[len(retryErrors)-1].StatusCode
|
||||
}
|
||||
|
||||
c.JSON(statusCode, errorResponse)
|
||||
// 1. Find the group by name
|
||||
var group models.Group
|
||||
if err := ps.DB.Preload("APIKeys").Where("name = ?", groupName).First(&group).Error; err != nil {
|
||||
response.Error(c, http.StatusNotFound, fmt.Sprintf("Group '%s' not found", groupName))
|
||||
return
|
||||
}
|
||||
|
||||
// Get key information
|
||||
keyInfo, err := ps.keyManager.GetNextKey()
|
||||
// 2. Select an available API key from the group
|
||||
apiKey, err := ps.selectAPIKey(&group)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to get key: %v", err)
|
||||
c.JSON(http.StatusServiceUnavailable, gin.H{
|
||||
"error": "No API keys available",
|
||||
"code": errors.ErrNoKeysAvailable,
|
||||
})
|
||||
response.Error(c, http.StatusServiceUnavailable, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Set key information to context (for logging)
|
||||
c.Set("keyIndex", keyInfo.Index)
|
||||
c.Set("keyPreview", keyInfo.Preview)
|
||||
|
||||
// Set retry information to context
|
||||
if retryCount > 0 {
|
||||
c.Set("retryCount", retryCount)
|
||||
}
|
||||
|
||||
// Get a base URL from the config manager (handles round-robin)
|
||||
openaiConfig := ps.configManager.GetOpenAIConfig()
|
||||
upstreamURL, err := url.Parse(openaiConfig.BaseURL)
|
||||
// 3. Get the appropriate channel handler from the factory
|
||||
channelHandler, err := channel.GetChannel(&group)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to parse upstream URL: %v", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{
|
||||
"error": "Invalid upstream URL configured",
|
||||
"code": errors.ErrConfigInvalid,
|
||||
})
|
||||
response.Error(c, http.StatusInternalServerError, fmt.Sprintf("Failed to get channel for group '%s': %v", groupName, err))
|
||||
return
|
||||
}
|
||||
|
||||
// Build upstream request URL
|
||||
targetURL := *upstreamURL
|
||||
// Correctly append path instead of replacing it
|
||||
if strings.HasSuffix(targetURL.Path, "/") {
|
||||
targetURL.Path = targetURL.Path + strings.TrimPrefix(c.Request.URL.Path, "/")
|
||||
} else {
|
||||
targetURL.Path = targetURL.Path + c.Request.URL.Path
|
||||
}
|
||||
targetURL.RawQuery = c.Request.URL.RawQuery
|
||||
// 4. Forward the request using the channel handler
|
||||
channelHandler.Handle(c, apiKey, &group)
|
||||
|
||||
// Use different timeout strategies for streaming and non-streaming requests
|
||||
var ctx context.Context
|
||||
var cancel context.CancelFunc
|
||||
// 5. Log the request asynchronously
|
||||
go ps.logRequest(c, &group, apiKey, startTime)
|
||||
}
|
||||
|
||||
if isStreamRequest {
|
||||
// Streaming requests only set response header timeout, no overall timeout
|
||||
ctx, cancel = context.WithCancel(c.Request.Context())
|
||||
} else {
|
||||
// Non-streaming requests use configured timeout from the already fetched config
|
||||
timeout := time.Duration(openaiConfig.RequestTimeout) * time.Second
|
||||
ctx, cancel = context.WithTimeout(c.Request.Context(), timeout)
|
||||
}
|
||||
defer cancel()
|
||||
|
||||
// Create request using cached bodyBytes
|
||||
req, err := http.NewRequestWithContext(
|
||||
ctx,
|
||||
c.Request.Method,
|
||||
targetURL.String(),
|
||||
bytes.NewReader(bodyBytes),
|
||||
)
|
||||
if err != nil {
|
||||
logrus.Errorf("Failed to create upstream request: %v", err)
|
||||
c.JSON(http.StatusInternalServerError, gin.H{
|
||||
"error": "Failed to create upstream request",
|
||||
"code": errors.ErrProxyRequest,
|
||||
})
|
||||
return
|
||||
}
|
||||
req.ContentLength = int64(len(bodyBytes))
|
||||
|
||||
// Copy request headers
|
||||
for key, values := range c.Request.Header {
|
||||
if key != "Host" {
|
||||
for _, value := range values {
|
||||
req.Header.Add(key, value)
|
||||
}
|
||||
// selectAPIKey selects an API key from a group using round-robin
|
||||
func (ps *ProxyServer) selectAPIKey(group *models.Group) (*models.APIKey, error) {
|
||||
activeKeys := make([]models.APIKey, 0, len(group.APIKeys))
|
||||
for _, key := range group.APIKeys {
|
||||
if key.Status == "active" {
|
||||
activeKeys = append(activeKeys, key)
|
||||
}
|
||||
}
|
||||
|
||||
if c.GetHeader("Authorization") != "" {
|
||||
req.Header.Set("Authorization", "Bearer "+keyInfo.Key)
|
||||
req.Header.Del("X-Goog-Api-Key")
|
||||
} else if c.GetHeader("X-Goog-Api-Key") != "" {
|
||||
req.Header.Set("X-Goog-Api-Key", keyInfo.Key)
|
||||
req.Header.Del("Authorization")
|
||||
} else if c.Query("key") != "" {
|
||||
q := req.URL.Query()
|
||||
q.Set("key", keyInfo.Key)
|
||||
req.URL.RawQuery = q.Encode()
|
||||
} else {
|
||||
c.JSON(http.StatusUnauthorized, gin.H{
|
||||
"error": "API key required. Please provide a key in 'Authorization' or 'X-Goog-Api-Key' header.",
|
||||
"code": errors.ErrAuthMissing,
|
||||
})
|
||||
c.Abort()
|
||||
return
|
||||
if len(activeKeys) == 0 {
|
||||
return nil, fmt.Errorf("no active API keys available in group '%s'", group.Name)
|
||||
}
|
||||
|
||||
// Choose appropriate client based on request type
|
||||
var client *http.Client
|
||||
if isStreamRequest {
|
||||
client = ps.streamClient
|
||||
// Add header to disable nginx buffering
|
||||
req.Header.Set("X-Accel-Buffering", "no")
|
||||
} else {
|
||||
client = ps.httpClient
|
||||
// Get the current counter for the group
|
||||
counter, _ := ps.groupCounters.LoadOrStore(group.ID, uint64(0))
|
||||
currentCounter := counter.(uint64)
|
||||
|
||||
// Select the key and increment the counter
|
||||
selectedKey := activeKeys[int(currentCounter%uint64(len(activeKeys)))]
|
||||
ps.groupCounters.Store(group.ID, currentCounter+1)
|
||||
|
||||
return &selectedKey, nil
|
||||
}
|
||||
|
||||
func (ps *ProxyServer) logRequest(c *gin.Context, group *models.Group, key *models.APIKey, startTime time.Time) {
|
||||
logEntry := models.RequestLog{
|
||||
ID: fmt.Sprintf("req_%d", time.Now().UnixNano()),
|
||||
Timestamp: startTime,
|
||||
GroupID: group.ID,
|
||||
KeyID: key.ID,
|
||||
SourceIP: c.ClientIP(),
|
||||
StatusCode: c.Writer.Status(),
|
||||
RequestPath: c.Request.URL.Path,
|
||||
RequestBodySnippet: "", // Can be implemented later if needed
|
||||
}
|
||||
|
||||
// Send request
|
||||
resp, err := client.Do(req)
|
||||
if err != nil {
|
||||
responseTime := time.Since(startTime)
|
||||
|
||||
// Log failure
|
||||
if retryCount > 0 {
|
||||
logrus.Warnf("Retry request failed (attempt %d): %v (response time: %v)", retryCount+1, err, responseTime)
|
||||
} else {
|
||||
logrus.Warnf("Initial request failed: %v (response time: %v)", err, responseTime)
|
||||
}
|
||||
|
||||
// Record failure asynchronously
|
||||
go ps.keyManager.RecordFailure(keyInfo.Key, err)
|
||||
|
||||
// Record retry error information
|
||||
if retryErrors == nil {
|
||||
retryErrors = make([]types.RetryError, 0)
|
||||
}
|
||||
retryErrors = append(retryErrors, types.RetryError{
|
||||
StatusCode: 0, // Network error, no HTTP status code
|
||||
ErrorMessage: err.Error(),
|
||||
KeyIndex: keyInfo.Index,
|
||||
Attempt: retryCount + 1,
|
||||
})
|
||||
|
||||
// Retry
|
||||
ps.executeRequestWithRetry(c, startTime, bodyBytes, isStreamRequest, retryCount+1, retryErrors)
|
||||
return
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
responseTime := time.Since(startTime)
|
||||
|
||||
// Check if HTTP status code requires retry
|
||||
if resp.StatusCode >= 400 {
|
||||
// Log failure
|
||||
if retryCount > 0 {
|
||||
logrus.Debugf("Retry request returned error %d (attempt %d) (response time: %v)", resp.StatusCode, retryCount+1, responseTime)
|
||||
} else {
|
||||
logrus.Debugf("Initial request returned error %d (response time: %v)", resp.StatusCode, responseTime)
|
||||
}
|
||||
|
||||
// Read response body to get error information
|
||||
var errorMessage string
|
||||
bodyBytes, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
errorMessage = fmt.Sprintf("HTTP %d (failed to read body: %v)", resp.StatusCode, err)
|
||||
} else {
|
||||
if resp.Header.Get("Content-Encoding") == "gzip" {
|
||||
reader, gErr := gzip.NewReader(bytes.NewReader(bodyBytes))
|
||||
if gErr != nil {
|
||||
errorMessage = fmt.Sprintf("gzip reader error: %v", gErr)
|
||||
} else {
|
||||
uncompressedBytes, rErr := io.ReadAll(reader)
|
||||
reader.Close()
|
||||
if rErr != nil {
|
||||
errorMessage = fmt.Sprintf("gzip read error: %v", rErr)
|
||||
} else {
|
||||
errorMessage = string(uncompressedBytes)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
errorMessage = string(bodyBytes)
|
||||
}
|
||||
}
|
||||
|
||||
var jsonError struct {
|
||||
Error struct {
|
||||
Message string `json:"message"`
|
||||
} `json:"error"`
|
||||
}
|
||||
|
||||
if err := json.Unmarshal([]byte(errorMessage), &jsonError); err == nil && jsonError.Error.Message != "" {
|
||||
logrus.Warnf("Http Error: %s", jsonError.Error.Message)
|
||||
} else {
|
||||
logrus.Warnf("Http Error: %s", errorMessage)
|
||||
}
|
||||
|
||||
// Record failure asynchronously
|
||||
go ps.keyManager.RecordFailure(keyInfo.Key, fmt.Errorf("HTTP %d", resp.StatusCode))
|
||||
|
||||
// Record retry error information
|
||||
if retryErrors == nil {
|
||||
retryErrors = make([]types.RetryError, 0)
|
||||
}
|
||||
retryErrors = append(retryErrors, types.RetryError{
|
||||
StatusCode: resp.StatusCode,
|
||||
ErrorMessage: errorMessage,
|
||||
KeyIndex: keyInfo.Index,
|
||||
Attempt: retryCount + 1,
|
||||
})
|
||||
|
||||
// Retry
|
||||
ps.executeRequestWithRetry(c, startTime, bodyBytes, isStreamRequest, retryCount+1, retryErrors)
|
||||
return
|
||||
}
|
||||
|
||||
// Success - record success asynchronously
|
||||
go ps.keyManager.RecordSuccess(keyInfo.Key)
|
||||
|
||||
// Log final success result
|
||||
if retryCount > 0 {
|
||||
logrus.Debugf("Request succeeded after %d retries (response time: %v)", retryCount, responseTime)
|
||||
} else {
|
||||
logrus.Debugf("Request succeeded on first attempt (response time: %v)", responseTime)
|
||||
}
|
||||
|
||||
// Copy response headers
|
||||
for key, values := range resp.Header {
|
||||
for _, value := range values {
|
||||
c.Header(key, value)
|
||||
}
|
||||
}
|
||||
|
||||
// Set status code
|
||||
c.Status(resp.StatusCode)
|
||||
|
||||
// Handle streaming and non-streaming responses
|
||||
if isStreamRequest {
|
||||
ps.handleStreamingResponse(c, resp)
|
||||
} else {
|
||||
ps.handleNormalResponse(c, resp)
|
||||
// Send to the logging channel without blocking
|
||||
select {
|
||||
case ps.requestLogChan <- logEntry:
|
||||
default:
|
||||
logrus.Warn("Request log channel is full. Dropping log entry.")
|
||||
}
|
||||
}
|
||||
|
||||
var newline = []byte("\n")
|
||||
|
||||
// handleStreamingResponse handles streaming responses
|
||||
func (ps *ProxyServer) handleStreamingResponse(c *gin.Context, resp *http.Response) {
|
||||
// Set headers for streaming
|
||||
c.Header("Cache-Control", "no-cache")
|
||||
c.Header("Connection", "keep-alive")
|
||||
c.Header("Content-Type", "text/event-stream")
|
||||
c.Header("X-Accel-Buffering", "no")
|
||||
|
||||
flusher, ok := c.Writer.(http.Flusher)
|
||||
if !ok {
|
||||
logrus.Error("Streaming unsupported")
|
||||
c.JSON(http.StatusInternalServerError, gin.H{
|
||||
"error": "Streaming unsupported",
|
||||
"code": errors.ErrServerInternal,
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
scanner := bufio.NewScanner(resp.Body)
|
||||
scanner.Buffer(make([]byte, 0, 64*1024), 1024*1024)
|
||||
|
||||
for scanner.Scan() {
|
||||
lineBytes := scanner.Bytes()
|
||||
if _, err := c.Writer.Write(lineBytes); err != nil {
|
||||
if isIgnorableStreamError(err) {
|
||||
logrus.Debugf("Stream closed by client: %v", err)
|
||||
} else {
|
||||
logrus.Errorf("Failed to write streaming data: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
if _, err := c.Writer.Write(newline); err != nil {
|
||||
if isIgnorableStreamError(err) {
|
||||
logrus.Debugf("Stream closed by client: %v", err)
|
||||
} else {
|
||||
logrus.Errorf("Failed to write streaming data: %v", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
flusher.Flush()
|
||||
}
|
||||
|
||||
if err := scanner.Err(); err != nil {
|
||||
if isIgnorableStreamError(err) {
|
||||
logrus.Debugf("Stream closed by client or network: %v", err)
|
||||
} else {
|
||||
logrus.Errorf("Error reading streaming response: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// handleNormalResponse handles normal responses
|
||||
func (ps *ProxyServer) handleNormalResponse(c *gin.Context, resp *http.Response) {
|
||||
// Copy response body
|
||||
if _, err := io.Copy(c.Writer, resp.Body); err != nil {
|
||||
logrus.Errorf("Failed to copy response body: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Close closes the proxy server and cleans up resources
|
||||
// Close cleans up resources
|
||||
func (ps *ProxyServer) Close() {
|
||||
// Close HTTP clients if needed
|
||||
if ps.httpClient != nil {
|
||||
ps.httpClient.CloseIdleConnections()
|
||||
}
|
||||
if ps.streamClient != nil {
|
||||
ps.streamClient.CloseIdleConnections()
|
||||
}
|
||||
// Nothing to close for now
|
||||
}
|
||||
|
48
internal/response/response.go
Normal file
48
internal/response/response.go
Normal file
@@ -0,0 +1,48 @@
|
||||
// Package response provides standardized JSON response helpers.
|
||||
package response
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// Response defines the standard JSON response structure.
|
||||
type Response struct {
|
||||
Code int `json:"code"`
|
||||
Message string `json:"message"`
|
||||
Data interface{} `json:"data,omitempty"`
|
||||
}
|
||||
|
||||
// Success sends a standardized success response.
|
||||
func Success(c *gin.Context, data interface{}) {
|
||||
c.JSON(http.StatusOK, Response{
|
||||
Code: 0,
|
||||
Message: "Success",
|
||||
Data: data,
|
||||
})
|
||||
}
|
||||
|
||||
// Error sends a standardized error response.
|
||||
func Error(c *gin.Context, code int, message string) {
|
||||
c.JSON(code, Response{
|
||||
Code: code,
|
||||
Message: message,
|
||||
Data: nil,
|
||||
})
|
||||
}
|
||||
|
||||
// BadRequest sends a 400 Bad Request error response.
|
||||
func BadRequest(c *gin.Context, message string) {
|
||||
Error(c, http.StatusBadRequest, message)
|
||||
}
|
||||
|
||||
// NotFound sends a 404 Not Found error response.
|
||||
func NotFound(c *gin.Context, message string) {
|
||||
Error(c, http.StatusNotFound, message)
|
||||
}
|
||||
|
||||
// InternalError sends a 500 Internal Server Error response.
|
||||
func InternalError(c *gin.Context, message string) {
|
||||
Error(c, http.StatusInternalServerError, message)
|
||||
}
|
@@ -18,6 +18,7 @@ type ConfigManager interface {
|
||||
GetLogConfig() LogConfig
|
||||
Validate() error
|
||||
DisplayConfig()
|
||||
ReloadConfig() error
|
||||
}
|
||||
|
||||
// KeyManager defines the interface for API key management
|
||||
|
Reference in New Issue
Block a user