Skip to content

Commit 7b37dc0

Browse files
committed
feat: enhance callModel to accept chat-style messages
- Add support for chat-style Message[] input in addition to responses-style input - Automatically convert chat-style messages to responses format - Handle system, user, assistant, and tool messages - Add tests for chat-style message support
1 parent 856fd27 commit 7b37dc0

File tree

3 files changed

+148
-3
lines changed

3 files changed

+148
-3
lines changed

src/funcs/callModel.ts

Lines changed: 76 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,69 @@ import * as models from "../models/index.js";
55
import { EnhancedTool, MaxToolRounds } from "../lib/tool-types.js";
66
import { convertEnhancedToolsToAPIFormat } from "../lib/tool-executor.js";
77

8+
/**
9+
* Input type that accepts both chat-style messages and responses-style input
10+
*/
11+
export type CallModelInput =
12+
| models.OpenResponsesInput
13+
| models.Message[];
14+
15+
/**
16+
* Check if input is chat-style messages (Message[])
17+
*/
18+
function isChatStyleMessages(input: CallModelInput): input is models.Message[] {
19+
if (!Array.isArray(input)) return false;
20+
if (input.length === 0) return false;
21+
22+
const first = input[0] as any;
23+
// Chat-style messages have role but no 'type' field at top level
24+
// Responses-style items have 'type' field (like 'message', 'function_call', etc.)
25+
return first && 'role' in first && !('type' in first);
26+
}
27+
28+
/**
29+
* Convert chat-style messages to responses-style input
30+
*/
31+
function convertChatToResponsesInput(messages: models.Message[]): models.OpenResponsesInput {
32+
return messages.map((msg): models.OpenResponsesEasyInputMessage | models.OpenResponsesFunctionCallOutput => {
33+
if (msg.role === "tool") {
34+
const toolMsg = msg as models.ToolResponseMessage;
35+
return {
36+
type: "function_call_output",
37+
callId: toolMsg.toolCallId,
38+
output: typeof toolMsg.content === "string" ? toolMsg.content : JSON.stringify(toolMsg.content),
39+
} as models.OpenResponsesFunctionCallOutput;
40+
}
41+
42+
// Handle assistant messages with tool calls
43+
if (msg.role === "assistant") {
44+
const assistantMsg = msg as models.AssistantMessage;
45+
// If it has tool calls, we need to convert them
46+
// For now, just convert the content part
47+
return {
48+
role: "assistant",
49+
content: typeof assistantMsg.content === "string"
50+
? assistantMsg.content
51+
: assistantMsg.content === null
52+
? ""
53+
: JSON.stringify(assistantMsg.content),
54+
} as models.OpenResponsesEasyInputMessage;
55+
}
56+
57+
// System, user, developer messages
58+
const content = typeof msg.content === "string"
59+
? msg.content
60+
: msg.content === null || msg.content === undefined
61+
? ""
62+
: JSON.stringify(msg.content);
63+
64+
return {
65+
role: msg.role as "user" | "system" | "developer",
66+
content,
67+
} as models.OpenResponsesEasyInputMessage;
68+
}) as models.OpenResponsesInput;
69+
}
70+
871
/**
972
* Get a response with multiple consumption patterns
1073
*
@@ -75,13 +138,24 @@ import { convertEnhancedToolsToAPIFormat } from "../lib/tool-executor.js";
75138
*/
76139
export function callModel(
77140
client: OpenRouterCore,
78-
request: Omit<models.OpenResponsesRequest, "stream" | "tools"> & {
141+
request: Omit<models.OpenResponsesRequest, "stream" | "tools" | "input"> & {
142+
input?: CallModelInput;
79143
tools?: EnhancedTool[] | models.OpenResponsesRequest["tools"];
80144
maxToolRounds?: MaxToolRounds;
81145
},
82146
options?: RequestOptions,
83147
): ResponseWrapper {
84-
const { tools, maxToolRounds, ...apiRequest } = request;
148+
const { tools, maxToolRounds, input, ...restRequest } = request;
149+
150+
// Convert chat-style messages to responses-style input if needed
151+
const convertedInput = input && isChatStyleMessages(input)
152+
? convertChatToResponsesInput(input)
153+
: input;
154+
155+
const apiRequest = {
156+
...restRequest,
157+
input: convertedInput,
158+
};
85159

86160
// Separate enhanced tools from API tools
87161
let isEnhancedTools = false;

src/sdk/sdk.ts

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -94,7 +94,8 @@ export class OpenRouter extends ClientSDK {
9494

9595
// #region sdk-class-body
9696
callModel(
97-
request: Omit<models.OpenResponsesRequest, "stream" | "tools"> & {
97+
request: Omit<models.OpenResponsesRequest, "stream" | "tools" | "input"> & {
98+
input?: import("../funcs/callModel.js").CallModelInput;
9899
tools?: EnhancedTool[] | models.OpenResponsesRequest["tools"];
99100
maxToolRounds?: MaxToolRounds;
100101
},

tests/e2e/callModel.test.ts

Lines changed: 70 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,76 @@ describe("callModel E2E Tests", () => {
2121
});
2222
});
2323

24+
describe("Chat-style messages support", () => {
25+
it("should accept chat-style Message array as input", async () => {
26+
const response = client.callModel({
27+
model: "meta-llama/llama-3.2-1b-instruct",
28+
input: [
29+
{
30+
role: "system",
31+
content: "You are a helpful assistant.",
32+
},
33+
{
34+
role: "user",
35+
content: "Say 'chat test' and nothing else.",
36+
},
37+
] as Message[],
38+
});
39+
40+
const text = await response.getText();
41+
42+
expect(text).toBeDefined();
43+
expect(typeof text).toBe("string");
44+
expect(text.length).toBeGreaterThan(0);
45+
});
46+
47+
it("should handle multi-turn chat-style conversation", async () => {
48+
const response = client.callModel({
49+
model: "meta-llama/llama-3.2-1b-instruct",
50+
input: [
51+
{
52+
role: "user",
53+
content: "My favorite color is blue.",
54+
},
55+
{
56+
role: "assistant",
57+
content: "That's nice! Blue is a calming color.",
58+
},
59+
{
60+
role: "user",
61+
content: "What is my favorite color?",
62+
},
63+
] as Message[],
64+
});
65+
66+
const text = await response.getText();
67+
68+
expect(text).toBeDefined();
69+
expect(text.toLowerCase()).toContain("blue");
70+
});
71+
72+
it("should handle system message in chat-style input", async () => {
73+
const response = client.callModel({
74+
model: "meta-llama/llama-3.2-1b-instruct",
75+
input: [
76+
{
77+
role: "system",
78+
content: "Always respond with exactly one word.",
79+
},
80+
{
81+
role: "user",
82+
content: "Say hello.",
83+
},
84+
] as Message[],
85+
});
86+
87+
const text = await response.getText();
88+
89+
expect(text).toBeDefined();
90+
expect(typeof text).toBe("string");
91+
});
92+
});
93+
2494
describe("response.text - Text extraction", () => {
2595
it("should successfully get text from a response", async () => {
2696
const response = client.callModel({

0 commit comments

Comments
 (0)