Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
612 changes: 612 additions & 0 deletions aiprompts/conn-arch.md

Large diffs are not rendered by default.

1,007 changes: 1,007 additions & 0 deletions aiprompts/fe-conn-arch.md

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions frontend/app/aipanel/aipanel.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import { waveAIHasSelection } from "@/app/aipanel/waveai-focus-utils";
import { ErrorBoundary } from "@/app/element/errorboundary";
import { atoms, getSettingsKeyAtom } from "@/app/store/global";
import { globalStore } from "@/app/store/jotaiStore";
import { useTabModel } from "@/app/store/tab-model";
import { maybeUseTabModel } from "@/app/store/tab-model";
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🔴 Critical

🧩 Analysis chain

🏁 Script executed:

find . -name "*.ts" -o -name "*.tsx" | head -20

Repository: wavetermdev/waveterm

Length of output: 618


🏁 Script executed:

rg "maybeUseTabModel|useTabModel" --type=tsx --type=ts -B 2 -A 5

Repository: wavetermdev/waveterm

Length of output: 91


🏁 Script executed:

fd aipanel.tsx

Repository: wavetermdev/waveterm

Length of output: 96


🏁 Script executed:

cat -n frontend/app/aipanel/aipanel.tsx | head -100

Repository: wavetermdev/waveterm

Length of output: 4887


🏁 Script executed:

cat -n frontend/app/aipanel/aipanel.tsx | sed -n '250,290p'

Repository: wavetermdev/waveterm

Length of output: 2567


🏁 Script executed:

rg "export function maybeUseTabModel|function maybeUseTabModel" -A 8 -B 2

Repository: wavetermdev/waveterm

Length of output: 315


🏁 Script executed:

rg "maybeUseTabModel" -g "*.ts" -g "*.tsx" -A 3 -B 2

Repository: wavetermdev/waveterm

Length of output: 1442


🏁 Script executed:

cat -n frontend/app/store/tab-model.ts | head -50

Repository: wavetermdev/waveterm

Length of output: 2011


🏁 Script executed:

rg "TabModelContext" -B 5 -A 3 frontend/app/store/tab-model.ts

Repository: wavetermdev/waveterm

Length of output: 526


Fix return type annotation and add null-safety check for tabModel.

The maybeUseTabModel() function has an incorrect return type annotation. It's declared to return TabModel (line 9 import), but the actual implementation returns useContext(TabModelContext) where the context is typed as TabModel | undefined with an undefined default value.

This creates a type safety issue at line 282:

body.tabid = tabModel.tabId;

If the component is used outside a TabModelProvider, tabModel will be undefined at runtime, causing a crash.

Required fixes:

  1. Update maybeUseTabModel() return type to TabModel | undefined (or use useTabModel() which properly throws)
  2. Add a null check before accessing tabModel.tabId on line 282, or use optional chaining: body.tabid = tabModel?.tabId;
🤖 Prompt for AI Agents
In frontend/app/aipanel/aipanel.tsx around lines 9 and 282: the imported
maybeUseTabModel() is typed as returning TabModel but its implementation
actually returns TabModel | undefined, which lets tabModel be undefined and
causes a runtime crash when accessing tabModel.tabId; update the
maybeUseTabModel() signature in its module to return TabModel | undefined (or
switch callers to use useTabModel() which throws when missing), and in this file
add a null-safety check before using tabModel on line 282 — either guard with if
(tabModel) { body.tabid = tabModel.tabId } or use optional chaining body.tabid =
tabModel?.tabId so you never dereference undefined.

import { checkKeyPressed, keydownWrapper } from "@/util/keyutil";
import { isMacOS, isWindows } from "@/util/platformutil";
import { cn } from "@/util/util";
Expand Down Expand Up @@ -255,7 +255,7 @@ const AIPanelComponentInner = memo(() => {
const isFocused = jotai.useAtomValue(model.isWaveAIFocusedAtom);
const telemetryEnabled = jotai.useAtomValue(getSettingsKeyAtom("telemetry:enabled")) ?? false;
const isPanelVisible = jotai.useAtomValue(model.getPanelVisibleAtom());
const tabModel = useTabModel();
const tabModel = maybeUseTabModel();
const defaultMode = jotai.useAtomValue(getSettingsKeyAtom("waveai:defaultmode")) ?? "waveai@balanced";
const aiModeConfigs = jotai.useAtomValue(model.aiModeConfigs);

Expand Down
45 changes: 28 additions & 17 deletions frontend/app/aipanel/aipanelinput.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -169,24 +169,35 @@ export const AIPanelInput = memo(({ onSubmit, status, model }: AIPanelInputProps
<i className="fa fa-paperclip text-sm"></i>
</button>
</Tooltip>
<Tooltip content="Send message (Enter)" placement="top" divClassName="absolute bottom-1.5 right-1">
<button
type="submit"
disabled={status !== "ready" || !input.trim()}
className={cn(
"w-5 h-5 transition-colors flex items-center justify-center",
status !== "ready" || !input.trim()
? "text-gray-400"
: "text-accent/80 hover:text-accent cursor-pointer"
)}
>
{status === "streaming" ? (
<i className="fa fa-spinner fa-spin text-sm"></i>
) : (
{status === "streaming" ? (
<Tooltip content="Stop Response" placement="top" divClassName="absolute bottom-1.5 right-1">
<button
type="button"
onClick={() => model.stopResponse()}
className={cn(
"w-5 h-5 transition-colors flex items-center justify-center",
"text-green-500 hover:text-green-400 cursor-pointer"
)}
>
<i className="fa fa-square text-sm"></i>
</button>
</Tooltip>
) : (
<Tooltip content="Send message (Enter)" placement="top" divClassName="absolute bottom-1.5 right-1">
<button
type="submit"
disabled={status !== "ready" || !input.trim()}
className={cn(
"w-5 h-5 transition-colors flex items-center justify-center",
status !== "ready" || !input.trim()
? "text-gray-400"
: "text-accent/80 hover:text-accent cursor-pointer"
)}
>
<i className="fa fa-paper-plane text-sm"></i>
)}
</button>
</Tooltip>
</button>
</Tooltip>
)}
</div>
</form>
</div>
Expand Down
40 changes: 30 additions & 10 deletions frontend/app/aipanel/waveai-model.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -42,12 +42,12 @@ export interface DroppedFile {

export class WaveAIModel {
private static instance: WaveAIModel | null = null;
private inputRef: React.RefObject<AIPanelInputRef> | null = null;
private scrollToBottomCallback: (() => void) | null = null;
private useChatSendMessage: UseChatSendMessageType | null = null;
private useChatSetMessages: UseChatSetMessagesType | null = null;
private useChatStatus: ChatStatus = "ready";
private useChatStop: (() => void) | null = null;
inputRef: React.RefObject<AIPanelInputRef> | null = null;
scrollToBottomCallback: (() => void) | null = null;
useChatSendMessage: UseChatSendMessageType | null = null;
useChatSetMessages: UseChatSetMessagesType | null = null;
useChatStatus: ChatStatus = "ready";
useChatStop: (() => void) | null = null;
// Used for injecting Wave-specific message data into DefaultChatTransport's prepareSendMessagesRequest
realMessage: AIMessage | null = null;
orefContext: ORef;
Expand Down Expand Up @@ -324,6 +324,29 @@ export class WaveAIModel {
}
}

async reloadChatFromBackend(chatIdValue: string): Promise<WaveUIMessage[]> {
const chatData = await RpcApi.GetWaveAIChatCommand(TabRpcClient, { chatid: chatIdValue });
const messages: UIMessage[] = chatData?.messages ?? [];
globalStore.set(this.isChatEmptyAtom, messages.length === 0);
return messages as WaveUIMessage[];
}

async stopResponse() {
this.useChatStop?.();
await new Promise((resolve) => setTimeout(resolve, 500));

const chatIdValue = globalStore.get(this.chatId);
if (!chatIdValue) {
return;
}
try {
const messages = await this.reloadChatFromBackend(chatIdValue);
this.useChatSetMessages?.(messages);
} catch (error) {
console.error("Failed to reload chat after stop:", error);
}
}

getAndClearMessage(): AIMessage | null {
const msg = this.realMessage;
this.realMessage = null;
Expand Down Expand Up @@ -448,10 +471,7 @@ export class WaveAIModel {
}

try {
const chatData = await RpcApi.GetWaveAIChatCommand(TabRpcClient, { chatid: chatIdValue });
const messages: UIMessage[] = chatData?.messages ?? [];
globalStore.set(this.isChatEmptyAtom, messages.length === 0);
return messages as WaveUIMessage[]; // this is safe just different RPC type vs the FE type, but they are compatible
return await this.reloadChatFromBackend(chatIdValue);
} catch (error) {
console.error("Failed to load chat:", error);
this.setError("Failed to load chat. Starting new chat...");
Expand Down
6 changes: 5 additions & 1 deletion frontend/app/app.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@

import { ClientModel } from "@/app/store/client-model";
import { GlobalModel } from "@/app/store/global-model";
import { getTabModelByTabId, TabModelContext } from "@/app/store/tab-model";
import { Workspace } from "@/app/workspace/workspace";
import { ContextMenuModel } from "@/store/contextmenu";
import { atoms, createBlock, getSettingsPrefixAtom, globalStore, isDev, removeFlashError } from "@/store/global";
Expand Down Expand Up @@ -31,12 +32,15 @@ const dlog = debug("wave:app");
const focusLog = debug("wave:focus");

const App = ({ onFirstRender }: { onFirstRender: () => void }) => {
const tabId = useAtomValue(atoms.staticTabId);
useEffect(() => {
onFirstRender();
}, []);
return (
<Provider store={globalStore}>
<AppInner />
<TabModelContext.Provider value={getTabModelByTabId(tabId)}>
<AppInner />
</TabModelContext.Provider>
</Provider>
);
};
Expand Down
16 changes: 9 additions & 7 deletions frontend/app/store/tab-model.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,9 @@ import { globalStore } from "./jotaiStore";
import * as WOS from "./wos";

const tabModelCache = new Map<string, TabModel>();
const activeTabIdAtom = atom<string>(null) as PrimitiveAtom<string>;
export const activeTabIdAtom = atom<string>(null) as PrimitiveAtom<string>;

class TabModel {
export class TabModel {
tabId: string;
tabAtom: Atom<Tab>;
tabNumBlocksAtom: Atom<number>;
Expand Down Expand Up @@ -40,7 +40,7 @@ class TabModel {
}
}

function getTabModelByTabId(tabId: string): TabModel {
export function getTabModelByTabId(tabId: string): TabModel {
let model = tabModelCache.get(tabId);
if (model == null) {
model = new TabModel(tabId);
Expand All @@ -49,22 +49,24 @@ function getTabModelByTabId(tabId: string): TabModel {
return model;
}

function getActiveTabModel(): TabModel | null {
export function getActiveTabModel(): TabModel | null {
const activeTabId = globalStore.get(activeTabIdAtom);
if (activeTabId == null) {
return null;
}
return getTabModelByTabId(activeTabId);
}

const TabModelContext = createContext<TabModel | undefined>(undefined);
export const TabModelContext = createContext<TabModel | undefined>(undefined);

function useTabModel(): TabModel {
export function useTabModel(): TabModel {
const model = useContext(TabModelContext);
if (model == null) {
throw new Error("useTabModel must be used within a TabModelProvider");
}
return model;
}

export { activeTabIdAtom, getActiveTabModel, getTabModelByTabId, TabModel, TabModelContext, useTabModel };
export function maybeUseTabModel(): TabModel {
return useContext(TabModelContext);
}
Comment on lines +70 to +72
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

⚠️ Potential issue | 🟡 Minor

Return type should include undefined.

useContext(TabModelContext) returns TabModel | undefined (since the context default is undefined), but maybeUseTabModel declares return type as TabModel. This can cause type errors for callers who correctly expect the function might return undefined.

🔎 Proposed fix
-export function maybeUseTabModel(): TabModel {
+export function maybeUseTabModel(): TabModel | undefined {
     return useContext(TabModelContext);
 }
📝 Committable suggestion

‼️ IMPORTANT
Carefully review the code before committing. Ensure that it accurately replaces the highlighted code, contains no missing lines, and has no issues with indentation. Thoroughly test & benchmark the code to ensure it meets the requirements.

Suggested change
export function maybeUseTabModel(): TabModel {
return useContext(TabModelContext);
}
export function maybeUseTabModel(): TabModel | undefined {
return useContext(TabModelContext);
}
🤖 Prompt for AI Agents
In frontend/app/store/tab-model.ts around lines 70 to 72, the function
maybeUseTabModel currently declares a return type of TabModel but calls
useContext(TabModelContext) which can return TabModel | undefined; update the
function signature to return TabModel | undefined and propagate that change to
any callers (or add non-null assertions only where safe) so TypeScript reflects
the possible undefined value from the context.

7 changes: 2 additions & 5 deletions frontend/app/workspace/workspace.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,6 @@ import { CenteredDiv } from "@/app/element/quickelems";
import { ModalsRenderer } from "@/app/modals/modalsrenderer";
import { TabBar } from "@/app/tab/tabbar";
import { TabContent } from "@/app/tab/tabcontent";
import { getTabModelByTabId, TabModelContext } from "@/app/store/tab-model";
import { Widgets } from "@/app/workspace/widgets";
import { WorkspaceLayoutModel } from "@/app/workspace/workspace-layout-model";
import { atoms, getApi } from "@/store/global";
Expand Down Expand Up @@ -70,7 +69,7 @@ const WorkspaceElem = memo(() => {
className="overflow-hidden"
>
<div ref={aiPanelWrapperRef} className="w-full h-full">
<AIPanel />
{tabId !== "" && <AIPanel />}
</div>
</Panel>
<PanelResizeHandle className="w-0.5 bg-transparent hover:bg-zinc-500/20 transition-colors" />
Expand All @@ -79,9 +78,7 @@ const WorkspaceElem = memo(() => {
<CenteredDiv>No Active Tab</CenteredDiv>
) : (
<div className="flex flex-row h-full">
<TabModelContext.Provider value={getTabModelByTabId(tabId)}>
<TabContent key={tabId} tabId={tabId} />
</TabModelContext.Provider>
<TabContent key={tabId} tabId={tabId} />
<Widgets />
</div>
)}
Expand Down
18 changes: 18 additions & 0 deletions pkg/aiusechat/chatstore/chatstore.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ package chatstore

import (
"fmt"
"slices"
"sync"

"github.com/wavetermdev/waveterm/pkg/aiusechat/uctypes"
Expand Down Expand Up @@ -109,3 +110,20 @@ func (cs *ChatStore) PostMessage(chatId string, aiOpts *uctypes.AIOptsType, mess

return nil
}

func (cs *ChatStore) RemoveMessage(chatId string, messageId string) bool {
cs.lock.Lock()
defer cs.lock.Unlock()

chat := cs.chats[chatId]
if chat == nil {
return false
}

initialLen := len(chat.NativeMessages)
chat.NativeMessages = slices.DeleteFunc(chat.NativeMessages, func(msg uctypes.GenAIMessage) bool {
return msg.GetMessageId() == messageId
})

return len(chat.NativeMessages) < initialLen
}
63 changes: 24 additions & 39 deletions pkg/aiusechat/gemini/gemini-backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,45 +42,6 @@ func ensureAltSse(endpoint string) (string, error) {
return endpoint, nil
}

// UpdateToolUseData updates the tool use data for a specific tool call in the chat
func UpdateToolUseData(chatId string, toolCallId string, toolUseData uctypes.UIMessageDataToolUse) error {
chat := chatstore.DefaultChatStore.Get(chatId)
if chat == nil {
return fmt.Errorf("chat not found: %s", chatId)
}

for _, genMsg := range chat.NativeMessages {
chatMsg, ok := genMsg.(*GeminiChatMessage)
if !ok {
continue
}

for i, part := range chatMsg.Parts {
if part.FunctionCall != nil && part.ToolUseData != nil && part.ToolUseData.ToolCallId == toolCallId {
// Update the message with new tool use data
updatedMsg := &GeminiChatMessage{
MessageId: chatMsg.MessageId,
Role: chatMsg.Role,
Parts: make([]GeminiMessagePart, len(chatMsg.Parts)),
Usage: chatMsg.Usage,
}
copy(updatedMsg.Parts, chatMsg.Parts)
updatedMsg.Parts[i].ToolUseData = &toolUseData

aiOpts := &uctypes.AIOptsType{
APIType: chat.APIType,
Model: chat.Model,
APIVersion: chat.APIVersion,
}

return chatstore.DefaultChatStore.PostMessage(chatId, aiOpts, updatedMsg)
}
}
}

return fmt.Errorf("tool call with ID %s not found in chat %s", toolCallId, chatId)
}

// appendPartToLastUserMessage appends a text part to the last user message in the contents slice
func appendPartToLastUserMessage(contents []GeminiContent, text string) {
for i := len(contents) - 1; i >= 0; i-- {
Expand Down Expand Up @@ -347,6 +308,14 @@ func processGeminiStream(
if errors.Is(err, io.EOF) {
break
}
if sseHandler.Err() != nil {
partialMsg := extractPartialGeminiMessage(msgID, textBuilder.String())
return &uctypes.WaveStopReason{
Kind: uctypes.StopKindCanceled,
ErrorType: "client_disconnect",
ErrorText: "client disconnected",
}, partialMsg, nil
}
_ = sseHandler.AiMsgError(fmt.Sprintf("stream decode error: %v", err))
return &uctypes.WaveStopReason{
Kind: uctypes.StopKindError,
Expand Down Expand Up @@ -512,3 +481,19 @@ func processGeminiStream(

return stopReason, assistantMsg, nil
}

func extractPartialGeminiMessage(msgID string, text string) *GeminiChatMessage {
if text == "" {
return nil
}

return &GeminiChatMessage{
MessageId: msgID,
Role: "model",
Parts: []GeminiMessagePart{
{
Text: text,
},
},
}
}
Loading
Loading