Skip to content

Commit 25f3350

Browse files
authored
ai/core: expose raw response headers (#1417)
1 parent d6431ae commit 25f3350

27 files changed

+543
-133
lines changed

‎.changeset/short-seas-flash.md‎

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,11 @@
1+
---
2+
'@ai-sdk/provider-utils': patch
3+
'@ai-sdk/anthropic': patch
4+
'@ai-sdk/provider': patch
5+
'@ai-sdk/mistral': patch
6+
'@ai-sdk/google': patch
7+
'@ai-sdk/openai': patch
8+
'ai': patch
9+
---
10+
11+
ai/core: add support for getting raw response headers.
Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,24 @@
1+
import { openai } from '@ai-sdk/openai';
2+
import { experimental_streamText } from 'ai';
3+
import dotenv from 'dotenv';
4+
5+
dotenv.config();
6+
7+
async function main() {
8+
const result = await experimental_streamText({
9+
model: openai('gpt-3.5-turbo'),
10+
maxTokens: 512,
11+
temperature: 0.3,
12+
maxRetries: 5,
13+
prompt: 'Invent a new holiday and describe its traditions.',
14+
});
15+
16+
console.log(`Request ID: ${result.rawResponse?.headers?.['x-request-id']}`);
17+
console.log();
18+
19+
for await (const textPart of result.textStream) {
20+
process.stdout.write(textPart);
21+
}
22+
}
23+
24+
main().catch(console.error);

‎packages/anthropic/src/anthropic-messages-language-model.test.ts‎

Lines changed: 47 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -11,10 +11,7 @@ const TEST_PROMPT: LanguageModelV1Prompt = [
1111
{ role: 'user', content: [{ type: 'text', text: 'Hello' }] },
1212
];
1313

14-
const provider = createAnthropic({
15-
apiKey: 'test-api-key',
16-
});
17-
14+
const provider = createAnthropic({ apiKey: 'test-api-key' });
1815
const model = provider.chat('claude-3-haiku-20240307');
1916

2017
describe('doGenerate', () => {
@@ -181,6 +178,28 @@ describe('doGenerate', () => {
181178
});
182179
});
183180

181+
it('should expose the raw response headers', async () => {
182+
prepareJsonResponse({});
183+
184+
server.responseHeaders = {
185+
'test-header': 'test-value',
186+
};
187+
188+
const { rawResponse } = await model.doGenerate({
189+
inputFormat: 'prompt',
190+
mode: { type: 'regular' },
191+
prompt: TEST_PROMPT,
192+
});
193+
194+
expect(rawResponse?.headers).toStrictEqual({
195+
// default headers:
196+
'content-type': 'application/json',
197+
198+
// custom header
199+
'test-header': 'test-value',
200+
});
201+
});
202+
184203
it('should pass the model and the messages', async () => {
185204
prepareJsonResponse({});
186205

@@ -279,6 +298,30 @@ describe('doStream', () => {
279298
]);
280299
});
281300

301+
it('should expose the raw response headers', async () => {
302+
prepareStreamResponse({ content: [] });
303+
304+
server.responseHeaders = {
305+
'test-header': 'test-value',
306+
};
307+
308+
const { rawResponse } = await model.doStream({
309+
inputFormat: 'prompt',
310+
mode: { type: 'regular' },
311+
prompt: TEST_PROMPT,
312+
});
313+
314+
expect(rawResponse?.headers).toStrictEqual({
315+
// default headers:
316+
'content-type': 'text/event-stream',
317+
'cache-control': 'no-cache',
318+
connection: 'keep-alive',
319+
320+
// custom header
321+
'test-header': 'test-value',
322+
});
323+
});
324+
282325
it('should pass the messages and the model', async () => {
283326
prepareStreamResponse({ content: [] });
284327

‎packages/anthropic/src/anthropic-messages-language-model.ts‎

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -164,7 +164,7 @@ export class AnthropicMessagesLanguageModel implements LanguageModelV1 {
164164
): Promise<Awaited<ReturnType<LanguageModelV1['doGenerate']>>> {
165165
const { args, warnings } = this.getArgs(options);
166166

167-
const response = await postJsonToApi({
167+
const { responseHeaders, value: response } = await postJsonToApi({
168168
url: `${this.config.baseURL}/messages`,
169169
headers: this.config.headers(),
170170
body: args,
@@ -210,6 +210,7 @@ export class AnthropicMessagesLanguageModel implements LanguageModelV1 {
210210
completionTokens: response.usage.output_tokens,
211211
},
212212
rawCall: { rawPrompt, rawSettings },
213+
rawResponse: { headers: responseHeaders },
213214
warnings,
214215
};
215216
}
@@ -219,7 +220,7 @@ export class AnthropicMessagesLanguageModel implements LanguageModelV1 {
219220
): Promise<Awaited<ReturnType<LanguageModelV1['doStream']>>> {
220221
const { args, warnings } = this.getArgs(options);
221222

222-
const response = await postJsonToApi({
223+
const { responseHeaders, value: response } = await postJsonToApi({
223224
url: `${this.config.baseURL}/messages`,
224225
headers: this.config.headers(),
225226
body: {
@@ -296,6 +297,7 @@ export class AnthropicMessagesLanguageModel implements LanguageModelV1 {
296297
}),
297298
),
298299
rawCall: { rawPrompt, rawSettings },
300+
rawResponse: { headers: responseHeaders },
299301
warnings,
300302
};
301303
}

‎packages/core/core/generate-object/generate-object.ts‎

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -94,6 +94,7 @@ Default and recommended: 'auto' (best mode for the model).
9494
let finishReason: LanguageModelV1FinishReason;
9595
let usage: Parameters<typeof calculateTokenUsage>[0];
9696
let warnings: LanguageModelV1CallWarning[] | undefined;
97+
let rawResponse: { headers?: Record<string, string> } | undefined;
9798
let logprobs: LanguageModelV1LogProbs | undefined;
9899

99100
switch (mode) {
@@ -122,6 +123,7 @@ Default and recommended: 'auto' (best mode for the model).
122123
finishReason = generateResult.finishReason;
123124
usage = generateResult.usage;
124125
warnings = generateResult.warnings;
126+
rawResponse = generateResult.rawResponse;
125127
logprobs = generateResult.logprobs;
126128

127129
break;
@@ -152,6 +154,7 @@ Default and recommended: 'auto' (best mode for the model).
152154
finishReason = generateResult.finishReason;
153155
usage = generateResult.usage;
154156
warnings = generateResult.warnings;
157+
rawResponse = generateResult.rawResponse;
155158
logprobs = generateResult.logprobs;
156159

157160
break;
@@ -192,6 +195,7 @@ Default and recommended: 'auto' (best mode for the model).
192195
finishReason = generateResult.finishReason;
193196
usage = generateResult.usage;
194197
warnings = generateResult.warnings;
198+
rawResponse = generateResult.rawResponse;
195199
logprobs = generateResult.logprobs;
196200

197201
break;
@@ -218,6 +222,7 @@ Default and recommended: 'auto' (best mode for the model).
218222
finishReason,
219223
usage: calculateTokenUsage(usage),
220224
warnings,
225+
rawResponse,
221226
logprobs,
222227
});
223228
}
@@ -246,6 +251,16 @@ Warnings from the model provider (e.g. unsupported settings)
246251
*/
247252
readonly warnings: LanguageModelV1CallWarning[] | undefined;
248253

254+
/**
255+
Optional raw response data.
256+
*/
257+
rawResponse?: {
258+
/**
259+
Response headers.
260+
*/
261+
headers?: Record<string, string>;
262+
};
263+
249264
/**
250265
Logprobs for the completion.
251266
`undefined` if the mode does not support logprobs or if was not enabled
@@ -257,12 +272,16 @@ Logprobs for the completion.
257272
finishReason: LanguageModelV1FinishReason;
258273
usage: TokenUsage;
259274
warnings: LanguageModelV1CallWarning[] | undefined;
275+
rawResponse?: {
276+
headers?: Record<string, string>;
277+
};
260278
logprobs: LanguageModelV1LogProbs | undefined;
261279
}) {
262280
this.object = options.object;
263281
this.finishReason = options.finishReason;
264282
this.usage = options.usage;
265283
this.warnings = options.warnings;
284+
this.rawResponse = options.rawResponse;
266285
this.logprobs = options.logprobs;
267286
}
268287
}

‎packages/core/core/generate-object/stream-object.ts‎

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -220,6 +220,7 @@ Default and recommended: 'auto' (best mode for the model).
220220
return new StreamObjectResult({
221221
stream: result.stream.pipeThrough(new TransformStream(transformer)),
222222
warnings: result.warnings,
223+
rawResponse: result.rawResponse,
223224
});
224225
}
225226

@@ -259,15 +260,30 @@ Warnings from the model provider (e.g. unsupported settings)
259260
*/
260261
readonly warnings: LanguageModelV1CallWarning[] | undefined;
261262

263+
/**
264+
Optional raw response data.
265+
*/
266+
rawResponse?: {
267+
/**
268+
Response headers.
269+
*/
270+
headers?: Record<string, string>;
271+
};
272+
262273
constructor({
263274
stream,
264275
warnings,
276+
rawResponse,
265277
}: {
266278
stream: ReadableStream<string | ObjectStreamPartInput>;
267279
warnings: LanguageModelV1CallWarning[] | undefined;
280+
rawResponse?: {
281+
headers?: Record<string, string>;
282+
};
268283
}) {
269284
this.originalStream = stream;
270285
this.warnings = warnings;
286+
this.rawResponse = rawResponse;
271287
}
272288

273289
get partialObjectStream(): AsyncIterableStream<DeepPartial<T>> {

‎packages/core/core/generate-text/generate-text.ts‎

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -116,6 +116,7 @@ The tools that the model can call. The model needs to support calling tools.
116116
finishReason: modelResponse.finishReason,
117117
usage: calculateTokenUsage(modelResponse.usage),
118118
warnings: modelResponse.warnings,
119+
rawResponse: modelResponse.rawResponse,
119120
logprobs: modelResponse.logprobs,
120121
});
121122
}
@@ -188,6 +189,16 @@ Warnings from the model provider (e.g. unsupported settings)
188189
*/
189190
readonly warnings: LanguageModelV1CallWarning[] | undefined;
190191

192+
/**
193+
Optional raw response data.
194+
*/
195+
rawResponse?: {
196+
/**
197+
Response headers.
198+
*/
199+
headers?: Record<string, string>;
200+
};
201+
191202
/**
192203
Logprobs for the completion.
193204
`undefined` if the mode does not support logprobs or if was not enabled
@@ -201,6 +212,9 @@ Logprobs for the completion.
201212
finishReason: LanguageModelV1FinishReason;
202213
usage: TokenUsage;
203214
warnings: LanguageModelV1CallWarning[] | undefined;
215+
rawResponse?: {
216+
headers?: Record<string, string>;
217+
};
204218
logprobs: LanguageModelV1LogProbs | undefined;
205219
}) {
206220
this.text = options.text;
@@ -209,6 +223,7 @@ Logprobs for the completion.
209223
this.finishReason = options.finishReason;
210224
this.usage = options.usage;
211225
this.warnings = options.warnings;
226+
this.rawResponse = options.rawResponse;
212227
this.logprobs = options.logprobs;
213228
}
214229
}

‎packages/core/core/generate-text/stream-text.test.ts‎

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,9 +4,8 @@ import { convertArrayToReadableStream } from '../test/convert-array-to-readable-
44
import { convertAsyncIterableToArray } from '../test/convert-async-iterable-to-array';
55
import { convertReadableStreamToArray } from '../test/convert-readable-stream-to-array';
66
import { MockLanguageModelV1 } from '../test/mock-language-model-v1';
7-
import { experimental_streamText } from './stream-text';
8-
import { ServerResponse } from 'node:http';
97
import { createMockServerResponse } from '../test/mock-server-response';
8+
import { experimental_streamText } from './stream-text';
109

1110
describe('result.textStream', () => {
1211
it('should send text deltas', async () => {

‎packages/core/core/generate-text/stream-text.ts‎

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -85,7 +85,7 @@ The tools that the model can call. The model needs to support calling tools.
8585
}): Promise<StreamTextResult<TOOLS>> {
8686
const retry = retryWithExponentialBackoff({ maxRetries });
8787
const validatedPrompt = getValidatedPrompt({ system, prompt, messages });
88-
const { stream, warnings } = await retry(() =>
88+
const { stream, warnings, rawResponse } = await retry(() =>
8989
model.doStream({
9090
mode: {
9191
type: 'regular',
@@ -112,6 +112,7 @@ The tools that the model can call. The model needs to support calling tools.
112112
generatorStream: stream,
113113
}),
114114
warnings,
115+
rawResponse,
115116
});
116117
}
117118

@@ -152,15 +153,30 @@ Warnings from the model provider (e.g. unsupported settings)
152153
*/
153154
readonly warnings: LanguageModelV1CallWarning[] | undefined;
154155

156+
/**
157+
Optional raw response data.
158+
*/
159+
rawResponse?: {
160+
/**
161+
Response headers.
162+
*/
163+
headers?: Record<string, string>;
164+
};
165+
155166
constructor({
156167
stream,
157168
warnings,
169+
rawResponse,
158170
}: {
159171
stream: ReadableStream<TextStreamPart<TOOLS>>;
160172
warnings: LanguageModelV1CallWarning[] | undefined;
173+
rawResponse?: {
174+
headers?: Record<string, string>;
175+
};
161176
}) {
162177
this.originalStream = stream;
163178
this.warnings = warnings;
179+
this.rawResponse = rawResponse;
164180
}
165181

166182
/**

0 commit comments

Comments
 (0)