// Name: Barbarian Chat
// Description: Chat with an AI Barbarian
// Author: Bard
import '@johnlindquist/kit'import { ChatOpenAI } from '@langchain/openai'
import { ChatPromptTemplate, MessagesPlaceholder } from '@langchain/core/prompts'
import { START, END, MessagesAnnotation, StateGraph, MemorySaver } from '@langchain/langgraph'
import { trimMessages } from '@langchain/core/messages'
let id: NodeJS.Timeout | number = -1;
let currentMessage: string = ``;
const llm = new ChatOpenAI({
modelName: 'gpt-4o-mini',
apiKey: await env('OPENAI_API_KEY'),
temperature: 0.7,
streaming: true,
callbacks: [
{
handleLLMStart: async () => {
id = setTimeout(() => {
let dots = 0
id = setInterval(() => {
const loadingDots = '.'.repeat(dots + 1)
chat.setMessage(-1, md(`### Barbarian Thinking${loadingDots}`))
dots = (dots + 1) % 3
}, 500)
}, 1000)
currentMessage = `` chat.addMessage('')
},
handleLLMNewToken: async (token: string) => {
clearInterval(id)
setLoading(false)
if (!token) return
currentMessage += token
let htmlMessage = md(currentMessage)
chat.setMessage(-1, htmlMessage)
},
handleLLMError: async (err: Error) => {
clearInterval(id)
warn(`error`, JSON.stringify(err))
running = false
},
handleLLMEnd: async () => { clearInterval(id)
running = false
},
},
],
})
const prompt = ChatPromptTemplate.fromMessages([
[
'system',
`You are a barbarian. Speak in short, direct sentences. Use simple words. Show anger and impatience. You are strong and do not like questions.`,
],
new MessagesPlaceholder('messages'),
])
const trimmer = trimMessages({
maxTokens: 4000,
strategy: 'last',
tokenCounter: msgs => msgs.length * 4,
includeSystem: true,
allowPartial: false,
startOn: 'human',
})
const callModel = async (state: typeof MessagesAnnotation.State) => {
const trimmedMessages = await trimmer.invoke(state.messages)
const chain = prompt.pipe(llm) const response = await chain.invoke({ messages: trimmedMessages }, { signal: controller?.signal })
return { messages: [response] }
}
const workflow = new StateGraph(MessagesAnnotation)
.addNode('model', callModel)
.addEdge(START, 'model')
.addEdge('model', END)
const memory = new MemorySaver()
const app = workflow.compile({ checkpointer: memory })
const config = { configurable: { thread_id: uuid() } }
let controller: AbortController | null = null
let running = false
await chat({
placeholder: 'Talk to Barbarian',
onEscape: async () => {
if (running) {
controller?.abort()
running = false
clearInterval(id)
}
},
onSubmit: async (input = '') => {
if (!input) return
running = true
controller = new AbortController()
try {
const messages = [
{
role: 'user',
content: input,
},
]
await app.invoke({ messages }, config)
} catch (error) {
console.log(`Error: ${error.message}`)
} finally {
running = false
}
},
})