Building a conversational AI with memory requires careful context management:
class ConversationManager {
constructor(options = {}) {
this.maxTokens = options.maxTokens || 4000;
this.systemPrompt = options.systemPrompt || 'You are a helpful assistant.';
this.conversations = new Map();
}
getHistory(sessionId) {
if (!this.conversations.has(sessionId)) {
this.conversations.set(sessionId, []);
}
return this.conversations.get(sessionId);
}
async chat(sessionId, userMessage) {
const history = this.getHistory(sessionId);
history.push({ role: 'user', content: userMessage });
// Trim history if too long
while (this.estimateTokens(history) > this.maxTokens) {
history.shift();
}
const response = await openai.chat.completions.create({
model: 'gpt-4',
messages: [
{ role: 'system', content: this.systemPrompt },
...history
]
});
const reply = response.choices[0].message.content;
history.push({ role: 'assistant', content: reply });
return reply;
}
estimateTokens(messages) {
return messages.reduce((sum, m) => sum + m.content.length / 4, 0);
}
}
