Compare commits

..

10 Commits

Author SHA1 Message Date
shokollm
4fa9b0456a fix: add fallback UUID generator for crypto.randomUUID compatibility
crypto.randomUUID() is not available in all environments (e.g., older browsers,
non-secure contexts). Added a fallback UUID v4 implementation.
2026-04-10 04:19:45 +00:00
shokollm
b3ab004447 fix: add timeout for chat requests and improve error handling
Changes:
1. Add 30-second timeout for chat API requests using AbortController
2. User's message now shows immediately before API response (already done in previous PR)
3. Differentiate between timeout errors and other errors in error messages
4. API client now accepts optional signal parameter for abort support
2026-04-10 04:09:30 +00:00
d394bc0857 Merge pull request 'fix: display user messages in chat interface' (#48) from fix/display-user-messages into main 2026-04-10 06:04:23 +02:00
shokollm
dfa806ab53 fix: add user's message to frontend chat store when sending
Previously, only the assistant's response was added to the frontend store.
Now both user and assistant messages are stored, so the conversation
displays correctly in the chat interface.
2026-04-10 04:00:51 +00:00
3493775b7f Merge pull request 'fix: update MiniMaxConnector default model to MiniMax-M2.7' (#47) from fix/minimax-connector-model into main 2026-04-10 06:00:36 +02:00
shokollm
82645dfb3b fix: update MiniMaxConnector default model to MiniMax-M2.7 2026-04-10 03:53:26 +00:00
c17fa243a1 Merge pull request 'fix: use MiniMax text/chatcompletion_v2 endpoint' (#46) from fix/minimax-endpoint-v2 into main 2026-04-10 05:44:25 +02:00
shokollm
a55ed9cc04 fix: use MiniMax text/chatcompletion_v2 endpoint instead of chat/completions
The /v1/chat/completions endpoint returns 529 (overloaded) while
/v1/text/chatcompletion_v2 works reliably.
2026-04-10 03:42:20 +00:00
d1408b74b4 Merge pull request 'fix: properly configure MiniMax API endpoint and CrewAI LLM' (#45) from fix/minimax-api-endpoint-v2 into main 2026-04-10 05:31:13 +02:00
shokollm
4197475eed fix: properly configure CrewAI LLM with MiniMax api_base
- Use CrewAI's LLM class directly with api_base parameter instead of custom subclass
- Remove broken MiniMaxLLM inheritance from LLM
- Update agent creation to use LLM(model, api_key, api_base) pattern

The issue was that inheriting from CrewAI's LLM class caused the api_base
to be set to None. Now we use CrewAI's LLM directly with the correct parameters.

Fixes #43
2026-04-10 03:19:51 +00:00
5 changed files with 40 additions and 15 deletions

View File

@@ -1,6 +1,6 @@
from typing import List, Optional, Dict, Any from typing import List, Optional, Dict, Any
from crewai import Agent, Task, Crew from crewai import Agent, Task, Crew, LLM
from .llm_connector import MiniMaxConnector, MiniMaxLLM from .llm_connector import MiniMaxConnector
from ...core.config import get_settings from ...core.config import get_settings
@@ -141,7 +141,7 @@ def create_trading_designer_agent(
role="Trading Strategy Designer", role="Trading Strategy Designer",
goal="Convert natural language trading requests into precise strategy configurations", goal="Convert natural language trading requests into precise strategy configurations",
backstory=system_prompt, backstory=system_prompt,
llm=MiniMaxLLM(api_key=api_key, model=model), llm=LLM(model=model, api_key=api_key, api_base="https://api.minimax.io/v1"),
verbose=True, verbose=True,
) )
@@ -155,7 +155,7 @@ def create_strategy_validator_agent(
backstory="""You are a meticulous strategy validator with expertise in trading systems. backstory="""You are a meticulous strategy validator with expertise in trading systems.
You check that all required parameters are present, values are reasonable, and the You check that all required parameters are present, values are reasonable, and the
strategy makes logical sense. You never approve strategies with missing or invalid data.""", strategy makes logical sense. You never approve strategies with missing or invalid data.""",
llm=MiniMaxLLM(api_key=api_key, model=model), llm=LLM(model=model, api_key=api_key, api_base="https://api.minimax.io/v1"),
verbose=True, verbose=True,
) )
@@ -169,7 +169,7 @@ def create_strategy_explainer_agent(
backstory="""You are a patient trading strategy explainer. You translate complex backstory="""You are a patient trading strategy explainer. You translate complex
strategy configurations into easy-to-understand language. You help users understand strategy configurations into easy-to-understand language. You help users understand
exactly what their strategies will do when triggered.""", exactly what their strategies will do when triggered.""",
llm=MiniMaxLLM(api_key=api_key, model=model), llm=LLM(model=model, api_key=api_key, api_base="https://api.minimax.io/v1"),
verbose=True, verbose=True,
) )

View File

@@ -1,11 +1,9 @@
from typing import Optional, List, Dict, Any from typing import Optional, List, Dict, Any
import httpx import httpx
from crewai import LLM
class MiniMaxLLM(LLM): class MiniMaxLLM:
def __init__(self, api_key: str, model: str = "MiniMax-M2.7", **kwargs): def __init__(self, api_key: str, model: str = "MiniMax-M2.7", **kwargs):
super().__init__(**kwargs)
self.api_key = api_key self.api_key = api_key
self.model = model self.model = model
self.base_url = "https://api.minimax.io/v1" self.base_url = "https://api.minimax.io/v1"
@@ -23,7 +21,7 @@ class MiniMaxLLM(LLM):
} }
with httpx.Client(timeout=60.0) as client: with httpx.Client(timeout=60.0) as client:
response = client.post( response = client.post(
f"{self.base_url}/chat/completions", f"{self.base_url}/text/chatcompletion_v2",
headers=headers, headers=headers,
json=payload, json=payload,
) )
@@ -35,7 +33,7 @@ class MiniMaxLLM(LLM):
class MiniMaxConnector: class MiniMaxConnector:
def __init__(self, api_key: str, model: str = "MiniMax-Text-01"): def __init__(self, api_key: str, model: str = "MiniMax-M2.7"):
self.api_key = api_key self.api_key = api_key
self.model = model self.model = model

View File

@@ -104,11 +104,12 @@ export const api = {
} }
}, },
async chat(id: string, message: string): Promise<BotChatResponse> { async chat(id: string, message: string, signal?: AbortSignal): Promise<BotChatResponse> {
const response = await fetch(`${API_URL}/bots/${id}/chat`, { const response = await fetch(`${API_URL}/bots/${id}/chat`, {
method: 'POST', method: 'POST',
headers: getAuthHeaders(), headers: getAuthHeaders(),
body: JSON.stringify({ message } as BotChatRequest) body: JSON.stringify({ message } as BotChatRequest),
signal
}); });
return handleResponse<BotChatResponse>(response); return handleResponse<BotChatResponse>(response);
}, },

View File

@@ -8,12 +8,25 @@ export interface ChatMessage {
timestamp: Date; timestamp: Date;
} }
// Fallback UUID generator for environments where crypto.randomUUID is not available
function generateId(): string {
if (typeof crypto !== 'undefined' && typeof crypto.randomUUID === 'function') {
return crypto.randomUUID();
}
// Fallback: simple UUID v4 implementation
return 'xxxxxxxx-xxxx-4xxx-yxxx-xxxxxxxxxxxx'.replace(/[xy]/g, (c) => {
const r = (Math.random() * 16) | 0;
const v = c === 'x' ? r : (r & 0x3) | 0x8;
return v.toString(16);
});
}
export const chatStore = writable<ChatMessage[]>([]); export const chatStore = writable<ChatMessage[]>([]);
export function addMessage(message: Omit<ChatMessage, 'id' | 'timestamp'>) { export function addMessage(message: Omit<ChatMessage, 'id' | 'timestamp'>) {
const newMessage: ChatMessage = { const newMessage: ChatMessage = {
...message, ...message,
id: crypto.randomUUID(), id: generateId(),
timestamp: new Date() timestamp: new Date()
}; };
chatStore.update(messages => [...messages, newMessage]); chatStore.update(messages => [...messages, newMessage]);

View File

@@ -44,8 +44,17 @@
isSending = true; isSending = true;
// Add user's message immediately so it shows even before API response
addMessage({ role: 'user', content: message });
try { try {
const response = await api.bots.chat(botId, message); // Add timeout to prevent hanging requests
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), 30000);
const response = await api.bots.chat(botId, message, controller.signal);
clearTimeout(timeoutId);
addMessage({ role: 'assistant', content: response.response }); addMessage({ role: 'assistant', content: response.response });
if (response.strategy_config) { if (response.strategy_config) {
@@ -53,7 +62,11 @@
setCurrentBot(bot); setCurrentBot(bot);
} }
} catch (e) { } catch (e) {
addMessage({ role: 'assistant', content: 'Sorry, I encountered an error. Please try again.' }); if (e instanceof Error && e.name === 'AbortError') {
addMessage({ role: 'assistant', content: 'Request timed out. Please try again.' });
} else {
addMessage({ role: 'assistant', content: 'Sorry, I encountered an error. Please try again.' });
}
} finally { } finally {
isSending = false; isSending = false;
} }