Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions backend/pkg/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,6 +124,11 @@ type Config struct {
KimiServerURL string `env:"KIMI_SERVER_URL" envDefault:"https://api.moonshot.ai/v1"`
KimiProvider string `env:"KIMI_PROVIDER"`

// MiniMax LLM provider
MiniMaxAPIKey string `env:"MINIMAX_API_KEY"`
MiniMaxServerURL string `env:"MINIMAX_SERVER_URL" envDefault:"https://api.minimax.io/v1"`
MiniMaxProvider string `env:"MINIMAX_PROVIDER"`

// Qwen (Tongyi Qianwen) provider
QwenAPIKey string `env:"QWEN_API_KEY"`
QwenServerURL string `env:"QWEN_SERVER_URL" envDefault:"https://dashscope-us.aliyuncs.com/compatible-mode/v1"`
Expand Down
130 changes: 130 additions & 0 deletions backend/pkg/providers/minimax/config.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,130 @@
simple:
model: MiniMax-M2.7
temperature: 0.5
top_p: 0.5
n: 1
max_tokens: 8192
price:
input: 0.40
output: 1.10

simple_json:
model: MiniMax-M2.7
temperature: 0.5
top_p: 0.5
n: 1
max_tokens: 4096
json: true
price:
input: 0.40
output: 1.10

primary_agent:
model: MiniMax-M2.7
temperature: 0.7
top_p: 0.8
n: 1
max_tokens: 16384
price:
input: 0.40
output: 1.10

assistant:
model: MiniMax-M2.7
temperature: 0.7
top_p: 0.8
n: 1
max_tokens: 16384
price:
input: 0.40
output: 1.10

generator:
model: MiniMax-M2.7
temperature: 0.7
top_p: 0.8
n: 1
max_tokens: 32768
price:
input: 0.40
output: 1.10

refiner:
model: MiniMax-M2.7
temperature: 0.7
top_p: 0.8
n: 1
max_tokens: 20480
price:
input: 0.40
output: 1.10

adviser:
model: MiniMax-M2.7
temperature: 0.7
top_p: 0.8
n: 1
max_tokens: 8192
price:
input: 0.40
output: 1.10

reflector:
model: MiniMax-M2.7
temperature: 0.5
top_p: 0.5
n: 1
max_tokens: 4096
price:
input: 0.40
output: 1.10

searcher:
model: MiniMax-M2.7
temperature: 0.7
top_p: 0.8
n: 1
max_tokens: 4096
price:
input: 0.40
output: 1.10

enricher:
model: MiniMax-M2.7
temperature: 0.7
top_p: 0.8
n: 1
max_tokens: 4096
price:
input: 0.40
output: 1.10

coder:
model: MiniMax-M2.7
temperature: 0.5
top_p: 0.5
n: 1
max_tokens: 20480
price:
input: 0.40
output: 1.10

installer:
model: MiniMax-M2.7
temperature: 0.5
top_p: 0.5
n: 1
max_tokens: 16384
price:
input: 0.40
output: 1.10

pentester:
model: MiniMax-M2.7
temperature: 0.5
top_p: 0.5
n: 1
max_tokens: 16384
price:
input: 0.40
output: 1.10
178 changes: 178 additions & 0 deletions backend/pkg/providers/minimax/minimax.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,178 @@
package minimax

import (
"context"
"embed"
"fmt"

"pentagi/pkg/config"
"pentagi/pkg/providers/pconfig"
"pentagi/pkg/providers/provider"
"pentagi/pkg/system"
"pentagi/pkg/templates"

"github.com/vxcontrol/langchaingo/llms"
"github.com/vxcontrol/langchaingo/llms/openai"
"github.com/vxcontrol/langchaingo/llms/streaming"
)

//go:embed config.yml models.yml
var configFS embed.FS

const MiniMaxAgentModel = "MiniMax-M2.7"

func BuildProviderConfig(configData []byte) (*pconfig.ProviderConfig, error) {
defaultOptions := []llms.CallOption{
llms.WithModel(MiniMaxAgentModel),
llms.WithN(1),
llms.WithMaxTokens(4000),
}

providerConfig, err := pconfig.LoadConfigData(configData, defaultOptions)
if err != nil {
return nil, err
}

return providerConfig, nil
}

func DefaultProviderConfig() (*pconfig.ProviderConfig, error) {
configData, err := configFS.ReadFile("config.yml")
if err != nil {
return nil, err
}

return BuildProviderConfig(configData)
}

func DefaultModels() (pconfig.ModelsConfig, error) {
configData, err := configFS.ReadFile("models.yml")
if err != nil {
return nil, err
}

return pconfig.LoadModelsConfigData(configData)
}

type minimaxProvider struct {
llm *openai.LLM
models pconfig.ModelsConfig
providerConfig *pconfig.ProviderConfig
providerPrefix string
}

func New(cfg *config.Config, providerConfig *pconfig.ProviderConfig) (provider.Provider, error) {
if cfg.MiniMaxAPIKey == "" {
return nil, fmt.Errorf("missing MINIMAX_API_KEY environment variable")
}

httpClient, err := system.GetHTTPClient(cfg)
if err != nil {
return nil, err
}

models, err := DefaultModels()
if err != nil {
return nil, err
}

client, err := openai.New(
openai.WithToken(cfg.MiniMaxAPIKey),
openai.WithModel(MiniMaxAgentModel),
openai.WithBaseURL(cfg.MiniMaxServerURL),
openai.WithHTTPClient(httpClient),
)
if err != nil {
return nil, err
}

return &minimaxProvider{
llm: client,
models: models,
providerConfig: providerConfig,
providerPrefix: cfg.MiniMaxProvider,
}, nil
}

func (p *minimaxProvider) Type() provider.ProviderType {
return provider.ProviderMiniMax
}

func (p *minimaxProvider) GetRawConfig() []byte {
return p.providerConfig.GetRawConfig()
}

func (p *minimaxProvider) GetProviderConfig() *pconfig.ProviderConfig {
return p.providerConfig
}

func (p *minimaxProvider) GetPriceInfo(opt pconfig.ProviderOptionsType) *pconfig.PriceInfo {
return p.providerConfig.GetPriceInfoForType(opt)
}

func (p *minimaxProvider) GetModels() pconfig.ModelsConfig {
return p.models
}

func (p *minimaxProvider) Model(opt pconfig.ProviderOptionsType) string {
model := MiniMaxAgentModel
opts := llms.CallOptions{Model: &model}
for _, option := range p.providerConfig.GetOptionsForType(opt) {
option(&opts)
}

return opts.GetModel()
}

func (p *minimaxProvider) ModelWithPrefix(opt pconfig.ProviderOptionsType) string {
return provider.ApplyModelPrefix(p.Model(opt), p.providerPrefix)
}

func (p *minimaxProvider) Call(
ctx context.Context,
opt pconfig.ProviderOptionsType,
prompt string,
) (string, error) {
return provider.WrapGenerateFromSinglePrompt(
ctx, p, opt, p.llm, prompt,
p.providerConfig.GetOptionsForType(opt)...,
)
}

func (p *minimaxProvider) CallEx(
ctx context.Context,
opt pconfig.ProviderOptionsType,
chain []llms.MessageContent,
streamCb streaming.Callback,
) (*llms.ContentResponse, error) {
return provider.WrapGenerateContent(
ctx, p, opt, p.llm.GenerateContent, chain,
append([]llms.CallOption{
llms.WithStreamingFunc(streamCb),
}, p.providerConfig.GetOptionsForType(opt)...)...,
)
}

func (p *minimaxProvider) CallWithTools(
ctx context.Context,
opt pconfig.ProviderOptionsType,
chain []llms.MessageContent,
tools []llms.Tool,
streamCb streaming.Callback,
) (*llms.ContentResponse, error) {
return provider.WrapGenerateContent(
ctx, p, opt, p.llm.GenerateContent, chain,
append([]llms.CallOption{
llms.WithTools(tools),
llms.WithStreamingFunc(streamCb),
}, p.providerConfig.GetOptionsForType(opt)...)...,
)
}

func (p *minimaxProvider) GetUsage(info map[string]any) pconfig.CallUsage {
return pconfig.NewCallUsage(info)
}

func (p *minimaxProvider) GetToolCallIDTemplate(ctx context.Context, prompter templates.Prompter) (string, error) {
return provider.DetermineToolCallIDTemplate(ctx, p, pconfig.OptionsTypeSimple, prompter)
}
27 changes: 27 additions & 0 deletions backend/pkg/providers/minimax/models.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
- name: MiniMax-M2.7
description: MiniMax-M2.7 - Latest flagship model with enhanced reasoning and coding capabilities. Supports tool calling, JSON output, and streaming.
thinking: false
price:
input: 0.40
output: 1.10

- name: MiniMax-M2.7-highspeed
description: MiniMax-M2.7-highspeed - High-speed version of M2.7 for low-latency scenarios. Same capabilities with improved response latency.
thinking: false
price:
input: 0.40
output: 1.10

- name: MiniMax-M2.5
description: MiniMax-M2.5 - High-performance large language model with 204K context window. Supports tool calling, JSON output, and streaming. Suitable for general dialogue, code generation, and complex reasoning tasks.
thinking: false
price:
input: 0.40
output: 1.10

- name: MiniMax-M2.5-highspeed
description: MiniMax-M2.5-highspeed - Optimized version of MiniMax-M2.5 for faster inference with 204K context window. Same capabilities as M2.5 with improved response latency.
thinking: false
price:
input: 0.40
output: 1.10
2 changes: 2 additions & 0 deletions backend/pkg/providers/provider/provider.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,7 @@ const (
ProviderDeepSeek ProviderType = "deepseek"
ProviderGLM ProviderType = "glm"
ProviderKimi ProviderType = "kimi"
ProviderMiniMax ProviderType = "minimax"
ProviderQwen ProviderType = "qwen"
)

Expand All @@ -48,6 +49,7 @@ const (
DefaultProviderNameDeepSeek ProviderName = ProviderName(ProviderDeepSeek)
DefaultProviderNameGLM ProviderName = ProviderName(ProviderGLM)
DefaultProviderNameKimi ProviderName = ProviderName(ProviderKimi)
DefaultProviderNameMiniMax ProviderName = ProviderName(ProviderMiniMax)
DefaultProviderNameQwen ProviderName = ProviderName(ProviderQwen)
)

Expand Down
Loading