Compare commits

...

13 Commits

Author SHA1 Message Date
shokollm
b3ab004447 fix: add timeout for chat requests and improve error handling
Changes:
1. Add 30-second timeout for chat API requests using AbortController
2. User's message now shows immediately before API response (already done in previous PR)
3. Differentiate between timeout errors and other errors in error messages
4. API client now accepts optional signal parameter for abort support
2026-04-10 04:09:30 +00:00
d394bc0857 Merge pull request 'fix: display user messages in chat interface' (#48) from fix/display-user-messages into main 2026-04-10 06:04:23 +02:00
shokollm
dfa806ab53 fix: add user's message to frontend chat store when sending
Previously, only the assistant's response was added to the frontend store.
Now both user and assistant messages are stored, so the conversation
displays correctly in the chat interface.
2026-04-10 04:00:51 +00:00
3493775b7f Merge pull request 'fix: update MiniMaxConnector default model to MiniMax-M2.7' (#47) from fix/minimax-connector-model into main 2026-04-10 06:00:36 +02:00
shokollm
82645dfb3b fix: update MiniMaxConnector default model to MiniMax-M2.7 2026-04-10 03:53:26 +00:00
c17fa243a1 Merge pull request 'fix: use MiniMax text/chatcompletion_v2 endpoint' (#46) from fix/minimax-endpoint-v2 into main 2026-04-10 05:44:25 +02:00
shokollm
a55ed9cc04 fix: use MiniMax text/chatcompletion_v2 endpoint instead of chat/completions
The /v1/chat/completions endpoint returns 529 (overloaded) while
/v1/text/chatcompletion_v2 works reliably.
2026-04-10 03:42:20 +00:00
d1408b74b4 Merge pull request 'fix: properly configure MiniMax API endpoint and CrewAI LLM' (#45) from fix/minimax-api-endpoint-v2 into main 2026-04-10 05:31:13 +02:00
shokollm
4197475eed fix: properly configure CrewAI LLM with MiniMax api_base
- Use CrewAI's LLM class directly with api_base parameter instead of custom subclass
- Remove broken MiniMaxLLM inheritance from LLM
- Update agent creation to use LLM(model, api_key, api_base) pattern

The issue was that inheriting from CrewAI's LLM class caused the api_base
to be set to None. Now we use CrewAI's LLM directly with the correct parameters.

Fixes #43
2026-04-10 03:19:51 +00:00
87bac8894a Merge pull request 'fix: update MiniMax API endpoint to api.minimax.io' (#44) from fix/minimax-api-endpoint into main 2026-04-10 05:10:17 +02:00
shokollm
bef4479675 fix: update MiniMax API endpoint and default model
Changes:
1. Updated API endpoint from api.minimax.chat to api.minimax.io
2. Changed default model from MiniMax-Text-01 to MiniMax-M2.7
   (MiniMax-Text-01 is not available for all API key plans)
3. Updated .env.example with correct default model

MiniMax API docs: https://platform.minimax.io/docs/api-reference/text-anthropic-api

Fixes #43
2026-04-10 03:07:02 +00:00
75970c57e3 Merge pull request 'feat: return access token on user registration' (#42) from feat/41-return-token-on-register into main 2026-04-10 03:31:15 +02:00
shokollm
f23044465a feat: return access token on user registration
After successful registration, the backend now returns an access token
(along with token_type) so the frontend can:
- Store the token in localStorage
- Fetch the user profile
- Redirect to dashboard

Fixes #41
2026-04-10 01:28:01 +00:00
6 changed files with 38 additions and 23 deletions

View File

@@ -32,7 +32,7 @@ MINIMAX_API_KEY=your-minimax-api-key
# MiniMax model to use
# Common options: MiniMax-Text-01, MiniMax-M2.1
MINIMAX_MODEL=MiniMax-Text-01
MINIMAX_MODEL=MiniMax-M2.7
# =============================================================================
# AVE CLOUD API

View File

@@ -58,7 +58,7 @@ def get_current_user(
@router.post(
"/register", response_model=UserResponse, status_code=status.HTTP_201_CREATED
"/register", response_model=Token, status_code=status.HTTP_201_CREATED
)
def register(user: UserCreate, db: Session = Depends(get_db)):
existing_user = db.query(User).filter(User.email == user.email).first()
@@ -75,7 +75,10 @@ def register(user: UserCreate, db: Session = Depends(get_db)):
db.add(db_user)
db.commit()
db.refresh(db_user)
return db_user
# Generate and return access token so frontend can proceed immediately
access_token = create_access_token(data={"sub": db_user.id})
return Token(access_token=access_token, token_type="bearer")
@router.post("/login", response_model=Token)

View File

@@ -1,6 +1,6 @@
from typing import List, Optional, Dict, Any
from crewai import Agent, Task, Crew
from .llm_connector import MiniMaxConnector, MiniMaxLLM
from crewai import Agent, Task, Crew, LLM
from .llm_connector import MiniMaxConnector
from ...core.config import get_settings
@@ -120,7 +120,7 @@ class StrategyExplainer:
def create_trading_designer_agent(
api_key: str, model: str = "MiniMax-Text-01"
api_key: str, model: str = "MiniMax-M2.7"
) -> Agent:
connector = MiniMaxConnector(api_key=api_key, model=model)
@@ -141,13 +141,13 @@ def create_trading_designer_agent(
role="Trading Strategy Designer",
goal="Convert natural language trading requests into precise strategy configurations",
backstory=system_prompt,
llm=MiniMaxLLM(api_key=api_key, model=model),
llm=LLM(model=model, api_key=api_key, api_base="https://api.minimax.io/v1"),
verbose=True,
)
def create_strategy_validator_agent(
api_key: str, model: str = "MiniMax-Text-01"
api_key: str, model: str = "MiniMax-M2.7"
) -> Agent:
return Agent(
role="Strategy Validator",
@@ -155,13 +155,13 @@ def create_strategy_validator_agent(
backstory="""You are a meticulous strategy validator with expertise in trading systems.
You check that all required parameters are present, values are reasonable, and the
strategy makes logical sense. You never approve strategies with missing or invalid data.""",
llm=MiniMaxLLM(api_key=api_key, model=model),
llm=LLM(model=model, api_key=api_key, api_base="https://api.minimax.io/v1"),
verbose=True,
)
def create_strategy_explainer_agent(
api_key: str, model: str = "MiniMax-Text-01"
api_key: str, model: str = "MiniMax-M2.7"
) -> Agent:
return Agent(
role="Strategy Explainer",
@@ -169,13 +169,13 @@ def create_strategy_explainer_agent(
backstory="""You are a patient trading strategy explainer. You translate complex
strategy configurations into easy-to-understand language. You help users understand
exactly what their strategies will do when triggered.""",
llm=MiniMaxLLM(api_key=api_key, model=model),
llm=LLM(model=model, api_key=api_key, api_base="https://api.minimax.io/v1"),
verbose=True,
)
class TradingCrew:
def __init__(self, api_key: str, model: str = "MiniMax-Text-01"):
def __init__(self, api_key: str, model: str = "MiniMax-M2.7"):
self.api_key = api_key
self.model = model
self.validator = StrategyValidator()

View File

@@ -1,14 +1,12 @@
from typing import Optional, List, Dict, Any
import httpx
from crewai import LLM
class MiniMaxLLM(LLM):
def __init__(self, api_key: str, model: str = "MiniMax-Text-01", **kwargs):
super().__init__(**kwargs)
class MiniMaxLLM:
def __init__(self, api_key: str, model: str = "MiniMax-M2.7", **kwargs):
self.api_key = api_key
self.model = model
self.base_url = "https://api.minimax.chat/v1"
self.base_url = "https://api.minimax.io/v1"
def _call(self, messages: List[Dict[str, str]], **kwargs) -> str:
headers = {
@@ -23,7 +21,7 @@ class MiniMaxLLM(LLM):
}
with httpx.Client(timeout=60.0) as client:
response = client.post(
f"{self.base_url}/chat/completions",
f"{self.base_url}/text/chatcompletion_v2",
headers=headers,
json=payload,
)
@@ -35,7 +33,7 @@ class MiniMaxLLM(LLM):
class MiniMaxConnector:
def __init__(self, api_key: str, model: str = "MiniMax-Text-01"):
def __init__(self, api_key: str, model: str = "MiniMax-M2.7"):
self.api_key = api_key
self.model = model

View File

@@ -104,11 +104,12 @@ export const api = {
}
},
async chat(id: string, message: string): Promise<BotChatResponse> {
async chat(id: string, message: string, signal?: AbortSignal): Promise<BotChatResponse> {
const response = await fetch(`${API_URL}/bots/${id}/chat`, {
method: 'POST',
headers: getAuthHeaders(),
body: JSON.stringify({ message } as BotChatRequest)
body: JSON.stringify({ message } as BotChatRequest),
signal
});
return handleResponse<BotChatResponse>(response);
},

View File

@@ -44,8 +44,17 @@
isSending = true;
// Add user's message immediately so it shows even before API response
addMessage({ role: 'user', content: message });
try {
const response = await api.bots.chat(botId, message);
// Add timeout to prevent hanging requests
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), 30000);
const response = await api.bots.chat(botId, message, controller.signal);
clearTimeout(timeoutId);
addMessage({ role: 'assistant', content: response.response });
if (response.strategy_config) {
@@ -53,7 +62,11 @@
setCurrentBot(bot);
}
} catch (e) {
addMessage({ role: 'assistant', content: 'Sorry, I encountered an error. Please try again.' });
if (e instanceof Error && e.name === 'AbortError') {
addMessage({ role: 'assistant', content: 'Request timed out. Please try again.' });
} else {
addMessage({ role: 'assistant', content: 'Sorry, I encountered an error. Please try again.' });
}
} finally {
isSending = false;
}