From fbd2fd4f8022b5ebe3e4fc88085d6a4d70b1c06f Mon Sep 17 00:00:00 2001
From: Vantz Stockwell
Date: Tue, 17 Mar 2026 10:22:07 -0400
Subject: [PATCH] =?UTF-8?q?feat:=20wire=20real=20Claude=20API=20=E2=80=94?=
=?UTF-8?q?=20OAuth=20login=20+=20live=20chat=20via=20Wails=20bindings?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
Replace mock responses in the XO copilot panel with real Wails binding
calls to the Go AIService backend:
- StartLogin now opens the browser via pkg/browser.OpenURL
- SendMessage returns ChatResponse (text + tool call results) instead of
bare error, fixing the tool-call accumulation bug in messageLoop
- Add GetModel/SetModel methods for frontend model switching
- Frontend useCopilot composable calls Go via Call.ByName from
@wailsio/runtime, with conversation auto-creation, auth checks, and
error display in the chat panel
- Store defaults to isAuthenticated=false; panel checks auth on mount
- CopilotSettings syncs model changes and logout to the backend
Co-Authored-By: Claude Opus 4.6 (1M context)
---
.../src/components/copilot/CopilotPanel.vue | 18 +-
.../components/copilot/CopilotSettings.vue | 24 +-
frontend/src/composables/useCopilot.ts | 283 ++++++++----------
frontend/src/stores/copilot.store.ts | 37 +--
internal/ai/service.go | 128 ++++++--
5 files changed, 269 insertions(+), 221 deletions(-)
diff --git a/frontend/src/components/copilot/CopilotPanel.vue b/frontend/src/components/copilot/CopilotPanel.vue
index 70c9fae..0c50104 100644
--- a/frontend/src/components/copilot/CopilotPanel.vue
+++ b/frontend/src/components/copilot/CopilotPanel.vue
@@ -50,7 +50,7 @@
@@ -126,14 +126,20 @@
diff --git a/frontend/src/composables/useCopilot.ts b/frontend/src/composables/useCopilot.ts
index 120d3f2..ebee827 100644
--- a/frontend/src/composables/useCopilot.ts
+++ b/frontend/src/composables/useCopilot.ts
@@ -1,177 +1,150 @@
import { useCopilotStore } from "@/stores/copilot.store";
import type { ToolCall } from "@/stores/copilot.store";
+import { Call } from "@wailsio/runtime";
/**
- * Composable providing mock Wails binding wrappers for the AI copilot.
+ * Fully qualified Go method name prefix for AIService bindings.
+ * Wails v3 ByName format: 'package.struct.method'
+ */
+const AI = "github.com/vstockwell/wraith/internal/ai.AIService";
+
+/** Call a bound Go method on AIService by name. */
+async function callAI(method: string, ...args: unknown[]): Promise {
+ return Call.ByName(`${AI}.${method}`, ...args) as Promise;
+}
+
+/** Shape returned by Go AIService.SendMessage. */
+interface ChatResponse {
+ text: string;
+ toolCalls?: {
+ name: string;
+ input: unknown;
+ result: unknown;
+ error?: string;
+ }[];
+}
+
+/**
+ * Composable providing real Wails binding wrappers for the AI copilot.
*
- * All functions simulate Claude-style behavior until real Wails bindings
- * (AIService.*) are connected.
+ * Calls the Go AIService via Wails v3 Call.ByName. SendMessage blocks
+ * until the full response (including tool-use loops) is complete.
*/
export function useCopilot() {
const store = useCopilotStore();
- /** Simulate word-by-word streaming output. */
- async function mockStream(
- text: string,
- onDelta: (word: string) => void,
- ): Promise {
- const words = text.split(" ");
- for (const word of words) {
- await new Promise((r) => setTimeout(r, 30 + Math.random() * 70));
- onDelta(word + " ");
- }
- }
-
- /** Simulate a tool call execution with realistic delay. */
- async function mockToolCall(
- name: string,
- _input: Record,
- ): Promise {
- await new Promise((r) => setTimeout(r, 500 + Math.random() * 1000));
-
- if (name === "list_sessions") return [];
- if (name === "terminal_read") {
- return {
- lines: [
- "$ systemctl status nginx",
- "● nginx.service - A high performance web server",
- " Loaded: loaded (/lib/systemd/system/nginx.service; enabled)",
- " Active: active (running) since Mon 2026-03-17 08:30:12 UTC",
- " Main PID: 1234 (nginx)",
- " Tasks: 5 (limit: 4915)",
- " Memory: 12.4M",
- ],
- };
- }
- if (name === "terminal_write") return { status: "ok" };
- if (name === "rdp_screenshot") {
- return { thumbnail: "data:image/jpeg;base64,/9j/4AAQ..." };
- }
- if (name === "rdp_click") return { status: "ok" };
- if (name === "rdp_type") return { status: "ok" };
- if (name === "sftp_read") return { content: "# example file content" };
- if (name === "sftp_write") return { bytesWritten: 1024 };
-
- return { status: "ok" };
- }
-
/**
- * Process a user message — route to the appropriate mock response.
- *
- * Routes:
- * - "hello"/"hey" -> greeting
- * - "ssh"/"server"/"check" -> terminal tool calls
- * - "rdp"/"desktop"/"screen" -> RDP screenshot tool call
- * - default -> list_sessions assessment
+ * Process a user message by calling the real Go backend.
+ * The backend blocks until the full response is ready (no streaming yet).
*/
async function processMessage(text: string): Promise {
- const lower = text.toLowerCase();
-
store.isStreaming = true;
- const assistantMsg = store.createAssistantMessage();
+
+ // Ensure we have a conversation
+ if (!store.activeConversationId) {
+ try {
+ const convId = await callAI("NewConversation");
+ store.activeConversationId = convId;
+ } catch (err) {
+ store.messages.push({
+ id: `msg-${Date.now()}`,
+ role: "assistant",
+ content: `Error creating conversation: ${err}`,
+ timestamp: Date.now(),
+ });
+ store.isStreaming = false;
+ return;
+ }
+ }
try {
- if (/\b(hello|hey|hi)\b/.test(lower)) {
- await mockStream(
- "XO online. I have access to your active sessions. What's the mission, Commander?",
- (word) => store.appendStreamDelta(word),
- );
- } else if (/\b(ssh|server|check|nginx|status)\b/.test(lower)) {
- await mockStream(
- "On it. Let me check the server status.",
- (word) => store.appendStreamDelta(word),
- );
-
- // Tool call 1: terminal_write
- const writeCallId = `tc-${Date.now()}-1`;
- const writeCall: ToolCall = {
- id: writeCallId,
- name: "terminal_write",
- input: { sessionId: "s1", text: "systemctl status nginx" },
- status: "pending",
- };
- store.addToolCall(writeCall);
-
- const writeResult = await mockToolCall("terminal_write", writeCall.input);
- store.completeToolCall(writeCallId, writeResult);
-
- // Tool call 2: terminal_read
- const readCallId = `tc-${Date.now()}-2`;
- const readCall: ToolCall = {
- id: readCallId,
- name: "terminal_read",
- input: { sessionId: "s1", lines: 20 },
- status: "pending",
- };
- store.addToolCall(readCall);
-
- const readResult = await mockToolCall("terminal_read", readCall.input);
- store.completeToolCall(readCallId, readResult);
-
- // Summary
- store.appendStreamDelta("\n\n");
- await mockStream(
- "Nginx is **active and running**. The service has been up since 08:30 UTC today, using 12.4M of memory with 5 active tasks. Everything looks healthy.",
- (word) => store.appendStreamDelta(word),
- );
- } else if (/\b(rdp|desktop|screen|screenshot)\b/.test(lower)) {
- await mockStream(
- "Taking a screenshot of the remote desktop.",
- (word) => store.appendStreamDelta(word),
- );
-
- // Tool call: rdp_screenshot
- const callId = `tc-${Date.now()}`;
- const call: ToolCall = {
- id: callId,
- name: "rdp_screenshot",
- input: { sessionId: "s2" },
- status: "pending",
- };
- store.addToolCall(call);
-
- const result = await mockToolCall("rdp_screenshot", call.input);
- store.completeToolCall(callId, result);
-
- store.appendStreamDelta("\n\n");
- await mockStream(
- "I can see the Windows desktop. The screen shows the default wallpaper with a few application shortcuts. No error dialogs or unusual activity detected.",
- (word) => store.appendStreamDelta(word),
- );
- } else {
- await mockStream(
- "Understood. Let me assess the situation.",
- (word) => store.appendStreamDelta(word),
- );
-
- // Tool call: list_sessions
- const callId = `tc-${Date.now()}`;
- const call: ToolCall = {
- id: callId,
- name: "list_sessions",
- input: {},
- status: "pending",
- };
- store.addToolCall(call);
-
- const result = await mockToolCall("list_sessions", call.input);
- store.completeToolCall(callId, result);
-
- store.appendStreamDelta("\n\n");
- await mockStream(
- "I've reviewed your current sessions. No active connections detected at the moment. Would you like me to connect to a specific server or run a diagnostic?",
- (word) => store.appendStreamDelta(word),
- );
- }
-
- // Track mock output tokens
- store.tokenUsage.output += Math.ceil(
- (assistantMsg.content.length) / 4,
+ const response = await callAI(
+ "SendMessage",
+ store.activeConversationId,
+ text,
);
+
+ // Build the assistant message from the response
+ const toolCalls: ToolCall[] | undefined = response.toolCalls?.map(
+ (tc) => ({
+ id: `tc-${Date.now()}-${Math.random().toString(36).slice(2, 8)}`,
+ name: tc.name,
+ input: (tc.input ?? {}) as Record,
+ result: tc.result,
+ status: (tc.error ? "error" : "done") as "done" | "error",
+ }),
+ );
+
+ store.messages.push({
+ id: `msg-${Date.now()}`,
+ role: "assistant",
+ content: response.text || "",
+ toolCalls: toolCalls,
+ timestamp: Date.now(),
+ });
+ } catch (err) {
+ store.messages.push({
+ id: `msg-${Date.now()}`,
+ role: "assistant",
+ content: `Error: ${err}`,
+ timestamp: Date.now(),
+ });
} finally {
store.isStreaming = false;
}
}
- return { mockStream, mockToolCall, processMessage };
+ /** Begin the OAuth login flow (opens browser). */
+ async function startLogin(): Promise {
+ try {
+ await callAI("StartLogin");
+ } catch (err) {
+ console.error("StartLogin failed:", err);
+ }
+ }
+
+ /** Check whether the user is authenticated. */
+ async function checkAuth(): Promise {
+ try {
+ const authed = await callAI("IsAuthenticated");
+ store.isAuthenticated = authed;
+ return authed;
+ } catch {
+ return false;
+ }
+ }
+
+ /** Log out and clear tokens. */
+ async function logout(): Promise {
+ try {
+ await callAI("Logout");
+ store.isAuthenticated = false;
+ store.clearHistory();
+ } catch (err) {
+ console.error("Logout failed:", err);
+ }
+ }
+
+ /** Sync the model setting to the Go backend. */
+ async function setModel(model: string): Promise {
+ try {
+ await callAI("SetModel", model);
+ store.model = model;
+ } catch (err) {
+ console.error("SetModel failed:", err);
+ }
+ }
+
+ /** Load the current model from the Go backend. */
+ async function getModel(): Promise {
+ try {
+ const m = await callAI("GetModel");
+ store.model = m;
+ return m;
+ } catch {
+ return store.model;
+ }
+ }
+
+ return { processMessage, startLogin, checkAuth, logout, setModel, getModel };
}
diff --git a/frontend/src/stores/copilot.store.ts b/frontend/src/stores/copilot.store.ts
index 0272e7f..f2e926c 100644
--- a/frontend/src/stores/copilot.store.ts
+++ b/frontend/src/stores/copilot.store.ts
@@ -30,16 +30,17 @@ export interface ConversationSummary {
* Copilot (XO) store.
* Manages the AI assistant panel state, messages, and streaming.
*
- * All Wails AIService calls are TODOs with mock behavior for now.
+ * Backend calls are handled by the useCopilot composable;
+ * this store manages purely reactive UI state.
*/
export const useCopilotStore = defineStore("copilot", () => {
const isPanelOpen = ref(false);
- const isAuthenticated = ref(true); // default true for mock
+ const isAuthenticated = ref(false);
const isStreaming = ref(false);
const activeConversationId = ref(null);
const messages = ref([]);
const conversations = ref([]);
- const model = ref("claude-sonnet-4-5-20250514");
+ const model = ref("claude-sonnet-4-20250514");
const tokenUsage = ref({ input: 0, output: 0 });
const showSettings = ref(false);
@@ -48,21 +49,14 @@ export const useCopilotStore = defineStore("copilot", () => {
isPanelOpen.value = !isPanelOpen.value;
}
- /** Start OAuth login flow. */
- function startLogin(): void {
- // TODO: Wails AIService.StartLogin()
- isAuthenticated.value = true;
- }
-
- /** Start a new conversation. */
+ /** Start a new conversation (resets local state). */
function newConversation(): void {
- // TODO: Wails AIService.NewConversation()
- activeConversationId.value = `conv-${Date.now()}`;
+ activeConversationId.value = null;
messages.value = [];
tokenUsage.value = { input: 0, output: 0 };
}
- /** Send a user message and trigger a mock XO response. */
+ /** Add a user message to the local message list. */
function sendMessage(text: string): void {
const userMsg: Message = {
id: `msg-${Date.now()}`,
@@ -72,12 +66,8 @@ export const useCopilotStore = defineStore("copilot", () => {
};
messages.value.push(userMsg);
- // Track mock input tokens
+ // Rough token estimate for display purposes
tokenUsage.value.input += Math.ceil(text.length / 4);
-
- if (!activeConversationId.value) {
- activeConversationId.value = `conv-${Date.now()}`;
- }
}
/** Append a streaming text delta to the latest assistant message. */
@@ -138,7 +128,7 @@ export const useCopilotStore = defineStore("copilot", () => {
/** Load conversation list from backend. */
async function loadConversations(): Promise {
- // TODO: Wails AIService.ListConversations()
+ // TODO: wire to AIService.ListConversations when conversation history UI is built
conversations.value = [];
}
@@ -149,13 +139,6 @@ export const useCopilotStore = defineStore("copilot", () => {
tokenUsage.value = { input: 0, output: 0 };
}
- /** Disconnect from Claude (reset auth state). */
- function disconnect(): void {
- // TODO: Wails AIService.Disconnect()
- isAuthenticated.value = false;
- clearHistory();
- }
-
return {
isPanelOpen,
isAuthenticated,
@@ -167,7 +150,6 @@ export const useCopilotStore = defineStore("copilot", () => {
tokenUsage,
showSettings,
togglePanel,
- startLogin,
newConversation,
sendMessage,
appendStreamDelta,
@@ -177,6 +159,5 @@ export const useCopilotStore = defineStore("copilot", () => {
failToolCall,
loadConversations,
clearHistory,
- disconnect,
};
});
diff --git a/internal/ai/service.go b/internal/ai/service.go
index 1c18b22..7f949ff 100644
--- a/internal/ai/service.go
+++ b/internal/ai/service.go
@@ -5,6 +5,8 @@ import (
"fmt"
"log/slog"
"sync"
+
+ "github.com/pkg/browser"
)
// SystemPrompt is the system prompt given to Claude for copilot interactions.
@@ -46,11 +48,25 @@ func NewAIService(oauth *OAuthManager, router *ToolRouter, convMgr *Conversation
}
}
+// ChatResponse is returned to the frontend after a complete AI turn.
+type ChatResponse struct {
+ Text string `json:"text"`
+ ToolCalls []ToolCallResult `json:"toolCalls,omitempty"`
+}
+
+// ToolCallResult captures a single tool invocation and its outcome.
+type ToolCallResult struct {
+ Name string `json:"name"`
+ Input interface{} `json:"input"`
+ Result interface{} `json:"result"`
+ Error string `json:"error,omitempty"`
+}
+
// --- Auth ---
// StartLogin begins the OAuth PKCE flow, opening the browser for authentication.
func (s *AIService) StartLogin() error {
- done, err := s.oauth.StartLogin(nil) // nil openURL = no browser auto-open
+ done, err := s.oauth.StartLogin(browser.OpenURL)
if err != nil {
return err
}
@@ -101,7 +117,8 @@ func (s *AIService) DeleteConversation(id string) error {
// SendMessage sends a user message in a conversation and processes the AI response.
// Tool calls are automatically dispatched and results fed back to the model.
// This method blocks until the full response (including any tool use loops) is complete.
-func (s *AIService) SendMessage(conversationId, text string) error {
+// It returns the aggregated text and tool-call results from all iterations.
+func (s *AIService) SendMessage(conversationId, text string) (*ChatResponse, error) {
// Add user message to conversation
userMsg := Message{
Role: "user",
@@ -110,7 +127,7 @@ func (s *AIService) SendMessage(conversationId, text string) error {
},
}
if err := s.conversations.AddMessage(conversationId, userMsg); err != nil {
- return fmt.Errorf("store user message: %w", err)
+ return nil, fmt.Errorf("store user message: %w", err)
}
// Run the message loop (handles tool use)
@@ -118,21 +135,26 @@ func (s *AIService) SendMessage(conversationId, text string) error {
}
// messageLoop sends the conversation to Claude and handles tool use loops.
-func (s *AIService) messageLoop(conversationId string) error {
+// It returns the aggregated ChatResponse containing all text and tool-call results.
+func (s *AIService) messageLoop(conversationId string) (*ChatResponse, error) {
+ resp := &ChatResponse{}
+
for iterations := 0; iterations < 20; iterations++ { // safety limit
messages, err := s.conversations.GetMessages(conversationId)
if err != nil {
- return err
+ return nil, err
}
ch, err := s.client.SendMessage(messages, CopilotTools, SystemPrompt)
if err != nil {
- return fmt.Errorf("send to claude: %w", err)
+ return nil, fmt.Errorf("send to claude: %w", err)
}
// Collect the response
var textParts []string
var toolCalls []ContentBlock
+ var currentToolID string
+ var currentToolName string
var currentToolInput string
for event := range ch {
@@ -140,10 +162,33 @@ func (s *AIService) messageLoop(conversationId string) error {
case "text_delta":
textParts = append(textParts, event.Data)
case "tool_use_start":
+ // Finalize any previous tool call
+ if currentToolID != "" {
+ toolCalls = append(toolCalls, ContentBlock{
+ Type: "tool_use",
+ ID: currentToolID,
+ Name: currentToolName,
+ Input: json.RawMessage(currentToolInput),
+ })
+ }
+ currentToolID = event.ToolID
+ currentToolName = event.ToolName
currentToolInput = ""
case "tool_use_delta":
currentToolInput += event.Data
case "done":
+ // Finalize any in-progress tool call
+ if currentToolID != "" {
+ toolCalls = append(toolCalls, ContentBlock{
+ Type: "tool_use",
+ ID: currentToolID,
+ Name: currentToolName,
+ Input: json.RawMessage(currentToolInput),
+ })
+ currentToolID = ""
+ currentToolName = ""
+ currentToolInput = ""
+ }
// Parse usage if available
var delta struct {
Usage Usage `json:"usage"`
@@ -152,34 +197,39 @@ func (s *AIService) messageLoop(conversationId string) error {
s.conversations.UpdateTokenUsage(conversationId, delta.Usage.InputTokens, delta.Usage.OutputTokens)
}
case "error":
- return fmt.Errorf("stream error: %s", event.Data)
+ return nil, fmt.Errorf("stream error: %s", event.Data)
}
+ }
- // When a tool_use block completes, we need to check content_block_stop
- // But since we accumulate, we'll finalize after the stream ends
- _ = toolCalls // kept for the final assembly below
+ // Finalize any trailing tool call (if stream ended without a "done" event)
+ if currentToolID != "" {
+ toolCalls = append(toolCalls, ContentBlock{
+ Type: "tool_use",
+ ID: currentToolID,
+ Name: currentToolName,
+ Input: json.RawMessage(currentToolInput),
+ })
}
// Build the assistant message
var assistantContent []ContentBlock
- if len(textParts) > 0 {
- fullText := ""
- for _, p := range textParts {
- fullText += p
- }
- if fullText != "" {
- assistantContent = append(assistantContent, ContentBlock{
- Type: "text",
- Text: fullText,
- })
- }
+ fullText := ""
+ for _, p := range textParts {
+ fullText += p
+ }
+ if fullText != "" {
+ assistantContent = append(assistantContent, ContentBlock{
+ Type: "text",
+ Text: fullText,
+ })
+ resp.Text += fullText
}
for _, tc := range toolCalls {
assistantContent = append(assistantContent, tc)
}
if len(assistantContent) == 0 {
- return nil // empty response
+ return resp, nil // empty response
}
// Store assistant message
@@ -188,7 +238,7 @@ func (s *AIService) messageLoop(conversationId string) error {
Content: assistantContent,
}
if err := s.conversations.AddMessage(conversationId, assistantMsg); err != nil {
- return fmt.Errorf("store assistant message: %w", err)
+ return nil, fmt.Errorf("store assistant message: %w", err)
}
// If there were tool calls, dispatch them and continue the loop
@@ -201,7 +251,7 @@ func (s *AIService) messageLoop(conversationId string) error {
}
if !hasToolUse {
- return nil // done, no tool use to process
+ return resp, nil // done, no tool use to process
}
// Dispatch tool calls and create tool_result message
@@ -211,24 +261,32 @@ func (s *AIService) messageLoop(conversationId string) error {
continue
}
- result, err := s.router.Dispatch(block.Name, block.Input)
+ result, dispatchErr := s.router.Dispatch(block.Name, block.Input)
resultBlock := ContentBlock{
Type: "tool_result",
ToolUseID: block.ID,
}
- if err != nil {
+ tcResult := ToolCallResult{
+ Name: block.Name,
+ Input: block.Input,
+ }
+
+ if dispatchErr != nil {
resultBlock.IsError = true
resultBlock.Content = []ContentBlock{
- {Type: "text", Text: err.Error()},
+ {Type: "text", Text: dispatchErr.Error()},
}
+ tcResult.Error = dispatchErr.Error()
} else {
resultJSON, _ := json.Marshal(result)
resultBlock.Content = []ContentBlock{
{Type: "text", Text: string(resultJSON)},
}
+ tcResult.Result = result
}
toolResults = append(toolResults, resultBlock)
+ resp.ToolCalls = append(resp.ToolCalls, tcResult)
}
toolResultMsg := Message{
@@ -236,13 +294,25 @@ func (s *AIService) messageLoop(conversationId string) error {
Content: toolResults,
}
if err := s.conversations.AddMessage(conversationId, toolResultMsg); err != nil {
- return fmt.Errorf("store tool results: %w", err)
+ return nil, fmt.Errorf("store tool results: %w", err)
}
// Continue the loop to let Claude process tool results
}
- return fmt.Errorf("exceeded maximum tool use iterations")
+ return nil, fmt.Errorf("exceeded maximum tool use iterations")
+}
+
+// --- Model ---
+
+// GetModel returns the current Claude model identifier.
+func (s *AIService) GetModel() string {
+ return s.client.model
+}
+
+// SetModel changes the Claude model used for subsequent requests.
+func (s *AIService) SetModel(model string) {
+ s.client.model = model
}
// --- Terminal Buffer Management ---