feat: wire real Claude API — OAuth login + live chat via Wails bindings
All checks were successful
Build & Sign Wraith / Build Windows + Sign (push) Successful in 1m2s

Replace mock responses in the XO copilot panel with real Wails binding
calls to the Go AIService backend:

- StartLogin now opens the browser via pkg/browser.OpenURL
- SendMessage returns ChatResponse (text + tool call results) instead of
  bare error, fixing the tool-call accumulation bug in messageLoop
- Add GetModel/SetModel methods for frontend model switching
- Frontend useCopilot composable calls Go via Call.ByName from
  @wailsio/runtime, with conversation auto-creation, auth checks, and
  error display in the chat panel
- Store defaults to isAuthenticated=false; panel checks auth on mount
- CopilotSettings syncs model changes and logout to the backend

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
Vantz Stockwell 2026-03-17 10:22:07 -04:00
parent f22fbe14fa
commit fbd2fd4f80
5 changed files with 269 additions and 221 deletions

View File

@ -50,7 +50,7 @@
</p> </p>
<button <button
class="px-4 py-2 text-sm font-medium text-white bg-[#1f6feb] hover:bg-[#388bfd] rounded transition-colors cursor-pointer" class="px-4 py-2 text-sm font-medium text-white bg-[#1f6feb] hover:bg-[#388bfd] rounded transition-colors cursor-pointer"
@click="store.startLogin()" @click="handleConnectLogin"
> >
Connect to Claude Connect to Claude
</button> </button>
@ -126,14 +126,20 @@
</template> </template>
<script setup lang="ts"> <script setup lang="ts">
import { ref, computed, watch, nextTick } from "vue"; import { ref, computed, watch, nextTick, onMounted } from "vue";
import { useCopilotStore } from "@/stores/copilot.store"; import { useCopilotStore } from "@/stores/copilot.store";
import { useCopilot } from "@/composables/useCopilot"; import { useCopilot } from "@/composables/useCopilot";
import CopilotMessage from "./CopilotMessage.vue"; import CopilotMessage from "./CopilotMessage.vue";
import CopilotSettings from "./CopilotSettings.vue"; import CopilotSettings from "./CopilotSettings.vue";
const store = useCopilotStore(); const store = useCopilotStore();
const { processMessage } = useCopilot(); const { processMessage, checkAuth, startLogin, getModel } = useCopilot();
// Check auth status and load model on mount
onMounted(async () => {
await checkAuth();
await getModel();
});
const panelWidth = 320; const panelWidth = 320;
const inputText = ref(""); const inputText = ref("");
@ -149,6 +155,12 @@ const modelShort = computed(() => {
return m; return m;
}); });
/** Trigger OAuth login from the inline panel button. */
async function handleConnectLogin(): Promise<void> {
await startLogin();
store.isAuthenticated = true;
}
/** Token formatter. */ /** Token formatter. */
function formatTokens(n: number): string { function formatTokens(n: number): string {
if (n >= 1000) return (n / 1000).toFixed(1) + "K"; if (n >= 1000) return (n / 1000).toFixed(1) + "K";

View File

@ -96,19 +96,23 @@
</template> </template>
<script setup lang="ts"> <script setup lang="ts">
import { ref } from "vue"; import { ref, watch } from "vue";
import { useCopilotStore } from "@/stores/copilot.store"; import { useCopilotStore } from "@/stores/copilot.store";
import { useCopilot } from "@/composables/useCopilot";
const store = useCopilotStore(); const store = useCopilotStore();
const { startLogin, logout, setModel } = useCopilot();
const apiKey = ref(""); const apiKey = ref("");
const emit = defineEmits<{ const emit = defineEmits<{
(e: "close"): void; (e: "close"): void;
}>(); }>();
function handleLogin(): void { async function handleLogin(): Promise<void> {
// TODO: Wails AIService.StartLogin() await startLogin();
store.startLogin(); // Auth state will be updated when the OAuth callback completes
// and the panel re-checks on next interaction.
store.isAuthenticated = true;
emit("close"); emit("close");
} }
@ -125,8 +129,8 @@ function handleClearHistory(): void {
emit("close"); emit("close");
} }
function handleDisconnect(): void { async function handleDisconnect(): Promise<void> {
store.disconnect(); await logout();
emit("close"); emit("close");
} }
@ -134,4 +138,12 @@ function formatTokens(n: number): string {
if (n >= 1000) return (n / 1000).toFixed(1) + "K"; if (n >= 1000) return (n / 1000).toFixed(1) + "K";
return String(n); return String(n);
} }
// Sync model changes to the Go backend
watch(
() => store.model,
(newModel) => {
setModel(newModel);
},
);
</script> </script>

View File

@ -1,177 +1,150 @@
import { useCopilotStore } from "@/stores/copilot.store"; import { useCopilotStore } from "@/stores/copilot.store";
import type { ToolCall } from "@/stores/copilot.store"; import type { ToolCall } from "@/stores/copilot.store";
import { Call } from "@wailsio/runtime";
/** /**
* Composable providing mock Wails binding wrappers for the AI copilot. * Fully qualified Go method name prefix for AIService bindings.
* Wails v3 ByName format: 'package.struct.method'
*/
const AI = "github.com/vstockwell/wraith/internal/ai.AIService";
/** Call a bound Go method on AIService by name. */
async function callAI<T = unknown>(method: string, ...args: unknown[]): Promise<T> {
return Call.ByName(`${AI}.${method}`, ...args) as Promise<T>;
}
/** Shape returned by Go AIService.SendMessage. */
interface ChatResponse {
text: string;
toolCalls?: {
name: string;
input: unknown;
result: unknown;
error?: string;
}[];
}
/**
* Composable providing real Wails binding wrappers for the AI copilot.
* *
* All functions simulate Claude-style behavior until real Wails bindings * Calls the Go AIService via Wails v3 Call.ByName. SendMessage blocks
* (AIService.*) are connected. * until the full response (including tool-use loops) is complete.
*/ */
export function useCopilot() { export function useCopilot() {
const store = useCopilotStore(); const store = useCopilotStore();
/** Simulate word-by-word streaming output. */
async function mockStream(
text: string,
onDelta: (word: string) => void,
): Promise<void> {
const words = text.split(" ");
for (const word of words) {
await new Promise((r) => setTimeout(r, 30 + Math.random() * 70));
onDelta(word + " ");
}
}
/** Simulate a tool call execution with realistic delay. */
async function mockToolCall(
name: string,
_input: Record<string, unknown>,
): Promise<unknown> {
await new Promise((r) => setTimeout(r, 500 + Math.random() * 1000));
if (name === "list_sessions") return [];
if (name === "terminal_read") {
return {
lines: [
"$ systemctl status nginx",
"● nginx.service - A high performance web server",
" Loaded: loaded (/lib/systemd/system/nginx.service; enabled)",
" Active: active (running) since Mon 2026-03-17 08:30:12 UTC",
" Main PID: 1234 (nginx)",
" Tasks: 5 (limit: 4915)",
" Memory: 12.4M",
],
};
}
if (name === "terminal_write") return { status: "ok" };
if (name === "rdp_screenshot") {
return { thumbnail: "data:image/jpeg;base64,/9j/4AAQ..." };
}
if (name === "rdp_click") return { status: "ok" };
if (name === "rdp_type") return { status: "ok" };
if (name === "sftp_read") return { content: "# example file content" };
if (name === "sftp_write") return { bytesWritten: 1024 };
return { status: "ok" };
}
/** /**
* Process a user message route to the appropriate mock response. * Process a user message by calling the real Go backend.
* * The backend blocks until the full response is ready (no streaming yet).
* Routes:
* - "hello"/"hey" -> greeting
* - "ssh"/"server"/"check" -> terminal tool calls
* - "rdp"/"desktop"/"screen" -> RDP screenshot tool call
* - default -> list_sessions assessment
*/ */
async function processMessage(text: string): Promise<void> { async function processMessage(text: string): Promise<void> {
const lower = text.toLowerCase();
store.isStreaming = true; store.isStreaming = true;
const assistantMsg = store.createAssistantMessage();
// Ensure we have a conversation
if (!store.activeConversationId) {
try {
const convId = await callAI<string>("NewConversation");
store.activeConversationId = convId;
} catch (err) {
store.messages.push({
id: `msg-${Date.now()}`,
role: "assistant",
content: `Error creating conversation: ${err}`,
timestamp: Date.now(),
});
store.isStreaming = false;
return;
}
}
try { try {
if (/\b(hello|hey|hi)\b/.test(lower)) { const response = await callAI<ChatResponse>(
await mockStream( "SendMessage",
"XO online. I have access to your active sessions. What's the mission, Commander?", store.activeConversationId,
(word) => store.appendStreamDelta(word), text,
);
} else if (/\b(ssh|server|check|nginx|status)\b/.test(lower)) {
await mockStream(
"On it. Let me check the server status.",
(word) => store.appendStreamDelta(word),
);
// Tool call 1: terminal_write
const writeCallId = `tc-${Date.now()}-1`;
const writeCall: ToolCall = {
id: writeCallId,
name: "terminal_write",
input: { sessionId: "s1", text: "systemctl status nginx" },
status: "pending",
};
store.addToolCall(writeCall);
const writeResult = await mockToolCall("terminal_write", writeCall.input);
store.completeToolCall(writeCallId, writeResult);
// Tool call 2: terminal_read
const readCallId = `tc-${Date.now()}-2`;
const readCall: ToolCall = {
id: readCallId,
name: "terminal_read",
input: { sessionId: "s1", lines: 20 },
status: "pending",
};
store.addToolCall(readCall);
const readResult = await mockToolCall("terminal_read", readCall.input);
store.completeToolCall(readCallId, readResult);
// Summary
store.appendStreamDelta("\n\n");
await mockStream(
"Nginx is **active and running**. The service has been up since 08:30 UTC today, using 12.4M of memory with 5 active tasks. Everything looks healthy.",
(word) => store.appendStreamDelta(word),
);
} else if (/\b(rdp|desktop|screen|screenshot)\b/.test(lower)) {
await mockStream(
"Taking a screenshot of the remote desktop.",
(word) => store.appendStreamDelta(word),
);
// Tool call: rdp_screenshot
const callId = `tc-${Date.now()}`;
const call: ToolCall = {
id: callId,
name: "rdp_screenshot",
input: { sessionId: "s2" },
status: "pending",
};
store.addToolCall(call);
const result = await mockToolCall("rdp_screenshot", call.input);
store.completeToolCall(callId, result);
store.appendStreamDelta("\n\n");
await mockStream(
"I can see the Windows desktop. The screen shows the default wallpaper with a few application shortcuts. No error dialogs or unusual activity detected.",
(word) => store.appendStreamDelta(word),
);
} else {
await mockStream(
"Understood. Let me assess the situation.",
(word) => store.appendStreamDelta(word),
);
// Tool call: list_sessions
const callId = `tc-${Date.now()}`;
const call: ToolCall = {
id: callId,
name: "list_sessions",
input: {},
status: "pending",
};
store.addToolCall(call);
const result = await mockToolCall("list_sessions", call.input);
store.completeToolCall(callId, result);
store.appendStreamDelta("\n\n");
await mockStream(
"I've reviewed your current sessions. No active connections detected at the moment. Would you like me to connect to a specific server or run a diagnostic?",
(word) => store.appendStreamDelta(word),
);
}
// Track mock output tokens
store.tokenUsage.output += Math.ceil(
(assistantMsg.content.length) / 4,
); );
// Build the assistant message from the response
const toolCalls: ToolCall[] | undefined = response.toolCalls?.map(
(tc) => ({
id: `tc-${Date.now()}-${Math.random().toString(36).slice(2, 8)}`,
name: tc.name,
input: (tc.input ?? {}) as Record<string, unknown>,
result: tc.result,
status: (tc.error ? "error" : "done") as "done" | "error",
}),
);
store.messages.push({
id: `msg-${Date.now()}`,
role: "assistant",
content: response.text || "",
toolCalls: toolCalls,
timestamp: Date.now(),
});
} catch (err) {
store.messages.push({
id: `msg-${Date.now()}`,
role: "assistant",
content: `Error: ${err}`,
timestamp: Date.now(),
});
} finally { } finally {
store.isStreaming = false; store.isStreaming = false;
} }
} }
return { mockStream, mockToolCall, processMessage }; /** Begin the OAuth login flow (opens browser). */
async function startLogin(): Promise<void> {
try {
await callAI("StartLogin");
} catch (err) {
console.error("StartLogin failed:", err);
}
}
/** Check whether the user is authenticated. */
async function checkAuth(): Promise<boolean> {
try {
const authed = await callAI<boolean>("IsAuthenticated");
store.isAuthenticated = authed;
return authed;
} catch {
return false;
}
}
/** Log out and clear tokens. */
async function logout(): Promise<void> {
try {
await callAI("Logout");
store.isAuthenticated = false;
store.clearHistory();
} catch (err) {
console.error("Logout failed:", err);
}
}
/** Sync the model setting to the Go backend. */
async function setModel(model: string): Promise<void> {
try {
await callAI("SetModel", model);
store.model = model;
} catch (err) {
console.error("SetModel failed:", err);
}
}
/** Load the current model from the Go backend. */
async function getModel(): Promise<string> {
try {
const m = await callAI<string>("GetModel");
store.model = m;
return m;
} catch {
return store.model;
}
}
return { processMessage, startLogin, checkAuth, logout, setModel, getModel };
} }

View File

@ -30,16 +30,17 @@ export interface ConversationSummary {
* Copilot (XO) store. * Copilot (XO) store.
* Manages the AI assistant panel state, messages, and streaming. * Manages the AI assistant panel state, messages, and streaming.
* *
* All Wails AIService calls are TODOs with mock behavior for now. * Backend calls are handled by the useCopilot composable;
* this store manages purely reactive UI state.
*/ */
export const useCopilotStore = defineStore("copilot", () => { export const useCopilotStore = defineStore("copilot", () => {
const isPanelOpen = ref(false); const isPanelOpen = ref(false);
const isAuthenticated = ref(true); // default true for mock const isAuthenticated = ref(false);
const isStreaming = ref(false); const isStreaming = ref(false);
const activeConversationId = ref<string | null>(null); const activeConversationId = ref<string | null>(null);
const messages = ref<Message[]>([]); const messages = ref<Message[]>([]);
const conversations = ref<ConversationSummary[]>([]); const conversations = ref<ConversationSummary[]>([]);
const model = ref("claude-sonnet-4-5-20250514"); const model = ref("claude-sonnet-4-20250514");
const tokenUsage = ref({ input: 0, output: 0 }); const tokenUsage = ref({ input: 0, output: 0 });
const showSettings = ref(false); const showSettings = ref(false);
@ -48,21 +49,14 @@ export const useCopilotStore = defineStore("copilot", () => {
isPanelOpen.value = !isPanelOpen.value; isPanelOpen.value = !isPanelOpen.value;
} }
/** Start OAuth login flow. */ /** Start a new conversation (resets local state). */
function startLogin(): void {
// TODO: Wails AIService.StartLogin()
isAuthenticated.value = true;
}
/** Start a new conversation. */
function newConversation(): void { function newConversation(): void {
// TODO: Wails AIService.NewConversation() activeConversationId.value = null;
activeConversationId.value = `conv-${Date.now()}`;
messages.value = []; messages.value = [];
tokenUsage.value = { input: 0, output: 0 }; tokenUsage.value = { input: 0, output: 0 };
} }
/** Send a user message and trigger a mock XO response. */ /** Add a user message to the local message list. */
function sendMessage(text: string): void { function sendMessage(text: string): void {
const userMsg: Message = { const userMsg: Message = {
id: `msg-${Date.now()}`, id: `msg-${Date.now()}`,
@ -72,12 +66,8 @@ export const useCopilotStore = defineStore("copilot", () => {
}; };
messages.value.push(userMsg); messages.value.push(userMsg);
// Track mock input tokens // Rough token estimate for display purposes
tokenUsage.value.input += Math.ceil(text.length / 4); tokenUsage.value.input += Math.ceil(text.length / 4);
if (!activeConversationId.value) {
activeConversationId.value = `conv-${Date.now()}`;
}
} }
/** Append a streaming text delta to the latest assistant message. */ /** Append a streaming text delta to the latest assistant message. */
@ -138,7 +128,7 @@ export const useCopilotStore = defineStore("copilot", () => {
/** Load conversation list from backend. */ /** Load conversation list from backend. */
async function loadConversations(): Promise<void> { async function loadConversations(): Promise<void> {
// TODO: Wails AIService.ListConversations() // TODO: wire to AIService.ListConversations when conversation history UI is built
conversations.value = []; conversations.value = [];
} }
@ -149,13 +139,6 @@ export const useCopilotStore = defineStore("copilot", () => {
tokenUsage.value = { input: 0, output: 0 }; tokenUsage.value = { input: 0, output: 0 };
} }
/** Disconnect from Claude (reset auth state). */
function disconnect(): void {
// TODO: Wails AIService.Disconnect()
isAuthenticated.value = false;
clearHistory();
}
return { return {
isPanelOpen, isPanelOpen,
isAuthenticated, isAuthenticated,
@ -167,7 +150,6 @@ export const useCopilotStore = defineStore("copilot", () => {
tokenUsage, tokenUsage,
showSettings, showSettings,
togglePanel, togglePanel,
startLogin,
newConversation, newConversation,
sendMessage, sendMessage,
appendStreamDelta, appendStreamDelta,
@ -177,6 +159,5 @@ export const useCopilotStore = defineStore("copilot", () => {
failToolCall, failToolCall,
loadConversations, loadConversations,
clearHistory, clearHistory,
disconnect,
}; };
}); });

View File

@ -5,6 +5,8 @@ import (
"fmt" "fmt"
"log/slog" "log/slog"
"sync" "sync"
"github.com/pkg/browser"
) )
// SystemPrompt is the system prompt given to Claude for copilot interactions. // SystemPrompt is the system prompt given to Claude for copilot interactions.
@ -46,11 +48,25 @@ func NewAIService(oauth *OAuthManager, router *ToolRouter, convMgr *Conversation
} }
} }
// ChatResponse is returned to the frontend after a complete AI turn.
type ChatResponse struct {
Text string `json:"text"`
ToolCalls []ToolCallResult `json:"toolCalls,omitempty"`
}
// ToolCallResult captures a single tool invocation and its outcome.
type ToolCallResult struct {
Name string `json:"name"`
Input interface{} `json:"input"`
Result interface{} `json:"result"`
Error string `json:"error,omitempty"`
}
// --- Auth --- // --- Auth ---
// StartLogin begins the OAuth PKCE flow, opening the browser for authentication. // StartLogin begins the OAuth PKCE flow, opening the browser for authentication.
func (s *AIService) StartLogin() error { func (s *AIService) StartLogin() error {
done, err := s.oauth.StartLogin(nil) // nil openURL = no browser auto-open done, err := s.oauth.StartLogin(browser.OpenURL)
if err != nil { if err != nil {
return err return err
} }
@ -101,7 +117,8 @@ func (s *AIService) DeleteConversation(id string) error {
// SendMessage sends a user message in a conversation and processes the AI response. // SendMessage sends a user message in a conversation and processes the AI response.
// Tool calls are automatically dispatched and results fed back to the model. // Tool calls are automatically dispatched and results fed back to the model.
// This method blocks until the full response (including any tool use loops) is complete. // This method blocks until the full response (including any tool use loops) is complete.
func (s *AIService) SendMessage(conversationId, text string) error { // It returns the aggregated text and tool-call results from all iterations.
func (s *AIService) SendMessage(conversationId, text string) (*ChatResponse, error) {
// Add user message to conversation // Add user message to conversation
userMsg := Message{ userMsg := Message{
Role: "user", Role: "user",
@ -110,7 +127,7 @@ func (s *AIService) SendMessage(conversationId, text string) error {
}, },
} }
if err := s.conversations.AddMessage(conversationId, userMsg); err != nil { if err := s.conversations.AddMessage(conversationId, userMsg); err != nil {
return fmt.Errorf("store user message: %w", err) return nil, fmt.Errorf("store user message: %w", err)
} }
// Run the message loop (handles tool use) // Run the message loop (handles tool use)
@ -118,21 +135,26 @@ func (s *AIService) SendMessage(conversationId, text string) error {
} }
// messageLoop sends the conversation to Claude and handles tool use loops. // messageLoop sends the conversation to Claude and handles tool use loops.
func (s *AIService) messageLoop(conversationId string) error { // It returns the aggregated ChatResponse containing all text and tool-call results.
func (s *AIService) messageLoop(conversationId string) (*ChatResponse, error) {
resp := &ChatResponse{}
for iterations := 0; iterations < 20; iterations++ { // safety limit for iterations := 0; iterations < 20; iterations++ { // safety limit
messages, err := s.conversations.GetMessages(conversationId) messages, err := s.conversations.GetMessages(conversationId)
if err != nil { if err != nil {
return err return nil, err
} }
ch, err := s.client.SendMessage(messages, CopilotTools, SystemPrompt) ch, err := s.client.SendMessage(messages, CopilotTools, SystemPrompt)
if err != nil { if err != nil {
return fmt.Errorf("send to claude: %w", err) return nil, fmt.Errorf("send to claude: %w", err)
} }
// Collect the response // Collect the response
var textParts []string var textParts []string
var toolCalls []ContentBlock var toolCalls []ContentBlock
var currentToolID string
var currentToolName string
var currentToolInput string var currentToolInput string
for event := range ch { for event := range ch {
@ -140,10 +162,33 @@ func (s *AIService) messageLoop(conversationId string) error {
case "text_delta": case "text_delta":
textParts = append(textParts, event.Data) textParts = append(textParts, event.Data)
case "tool_use_start": case "tool_use_start":
// Finalize any previous tool call
if currentToolID != "" {
toolCalls = append(toolCalls, ContentBlock{
Type: "tool_use",
ID: currentToolID,
Name: currentToolName,
Input: json.RawMessage(currentToolInput),
})
}
currentToolID = event.ToolID
currentToolName = event.ToolName
currentToolInput = "" currentToolInput = ""
case "tool_use_delta": case "tool_use_delta":
currentToolInput += event.Data currentToolInput += event.Data
case "done": case "done":
// Finalize any in-progress tool call
if currentToolID != "" {
toolCalls = append(toolCalls, ContentBlock{
Type: "tool_use",
ID: currentToolID,
Name: currentToolName,
Input: json.RawMessage(currentToolInput),
})
currentToolID = ""
currentToolName = ""
currentToolInput = ""
}
// Parse usage if available // Parse usage if available
var delta struct { var delta struct {
Usage Usage `json:"usage"` Usage Usage `json:"usage"`
@ -152,34 +197,39 @@ func (s *AIService) messageLoop(conversationId string) error {
s.conversations.UpdateTokenUsage(conversationId, delta.Usage.InputTokens, delta.Usage.OutputTokens) s.conversations.UpdateTokenUsage(conversationId, delta.Usage.InputTokens, delta.Usage.OutputTokens)
} }
case "error": case "error":
return fmt.Errorf("stream error: %s", event.Data) return nil, fmt.Errorf("stream error: %s", event.Data)
} }
}
// When a tool_use block completes, we need to check content_block_stop // Finalize any trailing tool call (if stream ended without a "done" event)
// But since we accumulate, we'll finalize after the stream ends if currentToolID != "" {
_ = toolCalls // kept for the final assembly below toolCalls = append(toolCalls, ContentBlock{
Type: "tool_use",
ID: currentToolID,
Name: currentToolName,
Input: json.RawMessage(currentToolInput),
})
} }
// Build the assistant message // Build the assistant message
var assistantContent []ContentBlock var assistantContent []ContentBlock
if len(textParts) > 0 { fullText := ""
fullText := "" for _, p := range textParts {
for _, p := range textParts { fullText += p
fullText += p }
} if fullText != "" {
if fullText != "" { assistantContent = append(assistantContent, ContentBlock{
assistantContent = append(assistantContent, ContentBlock{ Type: "text",
Type: "text", Text: fullText,
Text: fullText, })
}) resp.Text += fullText
}
} }
for _, tc := range toolCalls { for _, tc := range toolCalls {
assistantContent = append(assistantContent, tc) assistantContent = append(assistantContent, tc)
} }
if len(assistantContent) == 0 { if len(assistantContent) == 0 {
return nil // empty response return resp, nil // empty response
} }
// Store assistant message // Store assistant message
@ -188,7 +238,7 @@ func (s *AIService) messageLoop(conversationId string) error {
Content: assistantContent, Content: assistantContent,
} }
if err := s.conversations.AddMessage(conversationId, assistantMsg); err != nil { if err := s.conversations.AddMessage(conversationId, assistantMsg); err != nil {
return fmt.Errorf("store assistant message: %w", err) return nil, fmt.Errorf("store assistant message: %w", err)
} }
// If there were tool calls, dispatch them and continue the loop // If there were tool calls, dispatch them and continue the loop
@ -201,7 +251,7 @@ func (s *AIService) messageLoop(conversationId string) error {
} }
if !hasToolUse { if !hasToolUse {
return nil // done, no tool use to process return resp, nil // done, no tool use to process
} }
// Dispatch tool calls and create tool_result message // Dispatch tool calls and create tool_result message
@ -211,24 +261,32 @@ func (s *AIService) messageLoop(conversationId string) error {
continue continue
} }
result, err := s.router.Dispatch(block.Name, block.Input) result, dispatchErr := s.router.Dispatch(block.Name, block.Input)
resultBlock := ContentBlock{ resultBlock := ContentBlock{
Type: "tool_result", Type: "tool_result",
ToolUseID: block.ID, ToolUseID: block.ID,
} }
if err != nil { tcResult := ToolCallResult{
Name: block.Name,
Input: block.Input,
}
if dispatchErr != nil {
resultBlock.IsError = true resultBlock.IsError = true
resultBlock.Content = []ContentBlock{ resultBlock.Content = []ContentBlock{
{Type: "text", Text: err.Error()}, {Type: "text", Text: dispatchErr.Error()},
} }
tcResult.Error = dispatchErr.Error()
} else { } else {
resultJSON, _ := json.Marshal(result) resultJSON, _ := json.Marshal(result)
resultBlock.Content = []ContentBlock{ resultBlock.Content = []ContentBlock{
{Type: "text", Text: string(resultJSON)}, {Type: "text", Text: string(resultJSON)},
} }
tcResult.Result = result
} }
toolResults = append(toolResults, resultBlock) toolResults = append(toolResults, resultBlock)
resp.ToolCalls = append(resp.ToolCalls, tcResult)
} }
toolResultMsg := Message{ toolResultMsg := Message{
@ -236,13 +294,25 @@ func (s *AIService) messageLoop(conversationId string) error {
Content: toolResults, Content: toolResults,
} }
if err := s.conversations.AddMessage(conversationId, toolResultMsg); err != nil { if err := s.conversations.AddMessage(conversationId, toolResultMsg); err != nil {
return fmt.Errorf("store tool results: %w", err) return nil, fmt.Errorf("store tool results: %w", err)
} }
// Continue the loop to let Claude process tool results // Continue the loop to let Claude process tool results
} }
return fmt.Errorf("exceeded maximum tool use iterations") return nil, fmt.Errorf("exceeded maximum tool use iterations")
}
// --- Model ---
// GetModel returns the current Claude model identifier.
func (s *AIService) GetModel() string {
return s.client.model
}
// SetModel changes the Claude model used for subsequent requests.
func (s *AIService) SetModel(model string) {
s.client.model = model
} }
// --- Terminal Buffer Management --- // --- Terminal Buffer Management ---