AI Suggestion extension API reference
Configuration options
export interface AiAgentEventContext {
editor: Editor
provider: AiAgentProvider
}
export type EventHandlers = {
loadingError: (error: unknown, context: AiAgentEventContext) => void
stateChange: (
state: AiAgentProviderState,
previousState: AiAgentProviderState,
context: AiAgentEventContext,
) => void
beforeToolCall: (toolCall: AiAgentToolCall, context: AiAgentEventContext) => void
afterToolCall: (toolCall: AiAgentToolCall, context: AiAgentEventContext) => void
runFinished: (context: AiAgentEventContext) => void
}
/**
* Configuration options for the AI Agent provider.
*/
export interface AiAgentProviderOptions extends Omit<BaseAiAgentProviderOptions, 'onStateChange'> {
/**
* Application ID for authentication with the AI service.
*/
appId: string
/**
* Authentication token for the AI service.
*/
token: string
/**
* Base URL of the AI service API.
*/
baseUrl: string
/**
* Initial chat messages of the conversation with the AI Agent.
* @default []
* @example
* ```ts
* [
* {
* type: 'ai',
* text: "Hello, I'm an AI assistant that can edit your document. How can I help you?",
* },
* ]
* ```
*/
initialChatMessages: ChatMessage[]
/**
* Controls whether the AI agent should automatically accept tool calls that modify the editor content.
* @default "onlyRead"
*/
autoAccept: 'always' | 'never' | 'onlyRead'
/**
* Controls whether checkpoints are automatically saved when user sends a message or accepts a tool call.
* If false, the setCheckpoint function is not called.
* @default false
*/
autoSaveCheckpoints?: boolean
/**
* The OpenAI model to use for the AI Agent.
* Recommended models are gpt-4.1 and gpt-4o as they work best with the AI Agent.
* The model needs to support tool calling.
* @default "gpt-4.1"
*/
modelName?: AiAgentModelName
onLoadingError: EventHandlers['loadingError']
onStateChange: EventHandlers['stateChange']
onBeforeToolCall: EventHandlers['beforeToolCall']
onAfterToolCall: EventHandlers['afterToolCall']
onRunFinished: EventHandlers['runFinished']
/**
* Converts the LLM message to a format that can be parsed by the Tiptap AI Agent extension.
* @default openaiChatCompletionsAdapter
*/
adapter: AiAgentAdapter<unknown, unknown, unknown, unknown>
/**
* Whether the AI Agent should use the AI Changes extension.
* @default true
*/
useAiChangesExtension: boolean
/**
* Function to resolve the AI Agent request. Lets you integrate the AI Agent with your own backend service.
* @param options The options for the AI Agent request, including the chat messages, tool handlers, and model name.
* @returns The chat messages and tool call messages from the AI service
* @default defaultAiAgentResolver
*/
resolver: (options: AiAgentResolverOptions) => Promise<AiAgentResolverReturn>
/**
* Converts the chat messages of the AI Agent conversation to a format that can be sent to the LLM.
*
* @param messages The chat messages of the AI Agent conversation
* @param adapter The adapter to use for converting the messages
* @returns The LLM messages in the format expected by the LLM provider
*/
createLlmMessages: (
messages: ChatMessage[],
adapter: AiAgentAdapter<unknown, unknown, unknown, unknown>,
) => unknown[]
}
/**
* Internal state of the AI Agent provider.
*/
export interface AiAgentProviderState {
/**
* The current status of the AI agent. It determines what action to take next.
*
* The AI Agent works like a finite state machine that can transition from one
* state to another.
*
* @see AiAgentStatus
*/
status: AiAgentStatus
loadingError: unknown
/**
* The chat messages in the conversation.
*/
chatMessages: ChatMessage[]
chatMessagesPendingReview: {
accept: ChatMessage[]
reject: ChatMessage[]
}
/**
* Controls the automatic acceptance behavior of the AI agent.
* - "always": Automatically accepts all tool calls.
* - "never": Never automatically accepts tool calls, requiring manual review.
* - "onlyRead": Automatically accepts tool calls that do not modify the editor content.
*/
autoAccept: 'always' | 'never' | 'onlyRead'
}