Skip to content

Commit 3e617c7

Browse files
committed
feat(core): Support stream responses
1 parent 224c1a3 commit 3e617c7

File tree

6 files changed

+622
-8
lines changed

6 files changed

+622
-8
lines changed

dev-packages/node-integration-tests/suites/tracing/google-genai/scenario.mjs

Lines changed: 183 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -38,11 +38,31 @@ class MockGoogleGenAI {
3838
},
3939
};
4040
},
41+
42+
generateContentStream: async params => {
43+
// Simulate processing time
44+
await new Promise(resolve => setTimeout(resolve, 10));
45+
46+
if (params.model === 'error-model') {
47+
const error = new Error('Model not found');
48+
error.status = 404;
49+
throw error;
50+
}
51+
52+
if (params.model === 'blocked-model') {
53+
// Return a stream with blocked content in the first chunk
54+
return this._createBlockedMockStream();
55+
}
56+
57+
// Return an async generator that yields chunks
58+
return this._createMockStream();
59+
},
4160
};
4261

4362
this.chats = {
4463
create: options => {
4564
// Return a chat instance with sendMessage method and model info
65+
const self = this;
4666
return {
4767
model: options?.model || 'unknown', // Include model from create options
4868
sendMessage: async () => {
@@ -71,10 +91,108 @@ class MockGoogleGenAI {
7191
},
7292
};
7393
},
94+
95+
sendMessageStream: async () => {
96+
// Simulate processing time
97+
await new Promise(resolve => setTimeout(resolve, 10));
98+
99+
// Return an async generator that yields chunks
100+
return self._createMockStream();
101+
},
74102
};
75103
},
76104
};
77105
}
106+
107+
// Helper method to create a mock stream that yields clear GenerateContentResponse chunks
108+
async *_createMockStream() {
109+
// First chunk: Start of response with initial text
110+
yield {
111+
candidates: [
112+
{
113+
content: {
114+
parts: [{ text: 'Hello! ' }],
115+
role: 'model',
116+
},
117+
index: 0,
118+
},
119+
],
120+
responseId: 'mock-response-id',
121+
modelVersion: 'gemini-1.5-pro',
122+
};
123+
124+
// Second chunk: More text content
125+
yield {
126+
candidates: [
127+
{
128+
content: {
129+
parts: [{ text: 'This is a streaming ' }],
130+
role: 'model',
131+
},
132+
index: 0,
133+
},
134+
],
135+
};
136+
137+
// Third chunk: Final text content
138+
yield {
139+
candidates: [
140+
{
141+
content: {
142+
parts: [{ text: 'response from Google GenAI!' }],
143+
role: 'model',
144+
},
145+
index: 0,
146+
},
147+
],
148+
};
149+
150+
// Final chunk: End with finish reason and usage metadata
151+
yield {
152+
candidates: [
153+
{
154+
content: {
155+
parts: [{ text: '' }], // Empty text in final chunk
156+
role: 'model',
157+
},
158+
finishReason: 'STOP',
159+
index: 0,
160+
},
161+
],
162+
usageMetadata: {
163+
promptTokenCount: 10,
164+
candidatesTokenCount: 12,
165+
totalTokenCount: 22,
166+
},
167+
};
168+
}
169+
170+
// Helper method to create a mock stream with blocked content (promptFeedback in first chunk)
171+
async *_createBlockedMockStream() {
172+
// First chunk: Contains promptFeedback with blockReason (this should trigger error handling)
173+
yield {
174+
promptFeedback: {
175+
blockReason: 'SAFETY',
176+
blockReasonMessage: 'The prompt was blocked due to safety concerns',
177+
},
178+
responseId: 'mock-blocked-response-id',
179+
modelVersion: 'gemini-1.5-pro',
180+
};
181+
182+
// Note: In a real blocked scenario, there would typically be no more chunks
183+
// But we'll add one more to test that processing stops after the error
184+
yield {
185+
candidates: [
186+
{
187+
content: {
188+
parts: [{ text: 'This should not be processed' }],
189+
role: 'model',
190+
},
191+
index: 0,
192+
},
193+
],
194+
};
195+
}
78196
}
79197

80198
async function run() {
@@ -118,7 +236,71 @@ async function run() {
118236
],
119237
});
120238

121-
// Test 3: Error handling
239+
// Test 3: models.generateContentStream (streaming)
240+
const streamResponse = await instrumentedClient.models.generateContentStream({
241+
model: 'gemini-1.5-flash',
242+
config: {
243+
temperature: 0.7,
244+
topP: 0.9,
245+
maxOutputTokens: 100,
246+
},
247+
contents: [
248+
{
249+
role: 'user',
250+
parts: [{ text: 'Tell me about streaming' }],
251+
},
252+
],
253+
});
254+
255+
// Consume the stream
256+
for await (const _ of streamResponse) {
257+
void _;
258+
}
259+
260+
// Test 4: chat.sendMessageStream (streaming)
261+
const streamingChat = instrumentedClient.chats.create({
262+
model: 'gemini-1.5-pro',
263+
config: {
264+
temperature: 0.8,
265+
topP: 0.9,
266+
maxOutputTokens: 150,
267+
},
268+
});
269+
270+
const chatStreamResponse = await streamingChat.sendMessageStream({
271+
message: 'Tell me a streaming joke',
272+
});
273+
274+
// Consume the chat stream
275+
for await (const _ of chatStreamResponse) {
276+
void _;
277+
}
278+
279+
// Test 5: Blocked content streaming (should trigger error handling)
280+
try {
281+
const blockedStreamResponse = await instrumentedClient.models.generateContentStream({
282+
model: 'blocked-model',
283+
config: {
284+
temperature: 0.7,
285+
},
286+
contents: [
287+
{
288+
role: 'user',
289+
parts: [{ text: 'This content will be blocked' }],
290+
},
291+
],
292+
});
293+
294+
// Consume the stream - should encounter promptFeedback error in first chunk
295+
for await (const _ of blockedStreamResponse) {
296+
void _;
297+
}
298+
} catch (error) {
299+
// Expected: The stream should be processed, but the span should be marked with error status
300+
// The error handling happens in the streaming instrumentation, not as a thrown error
301+
}
302+
303+
// Test 6: Error handling
122304
try {
123305
await instrumentedClient.models.generateContent({
124306
model: 'error-model',

dev-packages/node-integration-tests/suites/tracing/google-genai/test.ts

Lines changed: 114 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,58 @@ describe('Google GenAI integration', () => {
6363
origin: 'auto.ai.google_genai',
6464
status: 'ok',
6565
}),
66-
// Fourth span - error handling
66+
// Fourth span - models.generateContentStream (streaming)
67+
expect.objectContaining({
68+
data: expect.objectContaining({
69+
'gen_ai.operation.name': 'models',
70+
'sentry.op': 'gen_ai.models',
71+
'sentry.origin': 'auto.ai.google_genai',
72+
'gen_ai.system': 'google_genai',
73+
'gen_ai.request.model': 'gemini-1.5-flash',
74+
'gen_ai.request.temperature': 0.7,
75+
'gen_ai.request.top_p': 0.9,
76+
'gen_ai.request.max_tokens': 100,
77+
'gen_ai.response.streaming': true,
78+
'gen_ai.response.id': 'mock-response-id',
79+
'gen_ai.response.model': 'gemini-1.5-pro',
80+
}),
81+
description: 'models gemini-1.5-flash stream-response',
82+
op: 'gen_ai.models',
83+
origin: 'auto.ai.google_genai',
84+
}),
85+
// Fifth span - chat.sendMessageStream (streaming)
86+
expect.objectContaining({
87+
data: expect.objectContaining({
88+
'gen_ai.operation.name': 'chat',
89+
'sentry.op': 'gen_ai.chat',
90+
'sentry.origin': 'auto.ai.google_genai',
91+
'gen_ai.system': 'google_genai',
92+
'gen_ai.request.model': 'gemini-1.5-pro',
93+
'gen_ai.response.streaming': true,
94+
'gen_ai.response.id': 'mock-response-id',
95+
'gen_ai.response.model': 'gemini-1.5-pro',
96+
}),
97+
description: 'chat gemini-1.5-pro stream-response',
98+
op: 'gen_ai.chat',
99+
origin: 'auto.ai.google_genai',
100+
}),
101+
// Sixth span - blocked content stream
102+
expect.objectContaining({
103+
data: expect.objectContaining({
104+
'gen_ai.operation.name': 'models',
105+
'sentry.op': 'gen_ai.models',
106+
'sentry.origin': 'auto.ai.google_genai',
107+
'gen_ai.system': 'google_genai',
108+
'gen_ai.request.model': 'blocked-model',
109+
'gen_ai.request.temperature': 0.7,
110+
'gen_ai.response.streaming': true,
111+
}),
112+
description: 'models blocked-model stream-response',
113+
op: 'gen_ai.models',
114+
origin: 'auto.ai.google_genai',
115+
status: 'unknown_error',
116+
}),
117+
// Seventh span - error handling
67118
expect.objectContaining({
68119
data: {
69120
'gen_ai.operation.name': 'models',
@@ -142,7 +193,57 @@ describe('Google GenAI integration', () => {
142193
origin: 'auto.ai.google_genai',
143194
status: 'ok',
144195
}),
145-
// Fourth span - error handling with PII
196+
// Fourth span - models.generateContentStream (streaming) with PII
197+
expect.objectContaining({
198+
data: expect.objectContaining({
199+
'gen_ai.operation.name': 'models',
200+
'sentry.op': 'gen_ai.models',
201+
'sentry.origin': 'auto.ai.google_genai',
202+
'gen_ai.system': 'google_genai',
203+
'gen_ai.request.model': 'gemini-1.5-flash',
204+
'gen_ai.request.messages': expect.any(String), // Should include contents when recordInputs: true
205+
'gen_ai.response.streaming': true,
206+
'gen_ai.response.id': 'mock-response-id',
207+
'gen_ai.response.model': 'gemini-1.5-pro',
208+
}),
209+
description: 'models gemini-1.5-flash stream-response',
210+
op: 'gen_ai.models',
211+
origin: 'auto.ai.google_genai',
212+
}),
213+
// Fifth span - chat.sendMessageStream (streaming) with PII
214+
expect.objectContaining({
215+
data: expect.objectContaining({
216+
'gen_ai.operation.name': 'chat',
217+
'sentry.op': 'gen_ai.chat',
218+
'sentry.origin': 'auto.ai.google_genai',
219+
'gen_ai.system': 'google_genai',
220+
'gen_ai.request.model': 'gemini-1.5-pro',
221+
'gen_ai.request.messages': expect.any(String), // Should include message when recordInputs: true
222+
'gen_ai.response.streaming': true,
223+
'gen_ai.response.id': 'mock-response-id',
224+
'gen_ai.response.model': 'gemini-1.5-pro',
225+
}),
226+
description: 'chat gemini-1.5-pro stream-response',
227+
op: 'gen_ai.chat',
228+
origin: 'auto.ai.google_genai',
229+
}),
230+
// Sixth span - blocked content stream with PII
231+
expect.objectContaining({
232+
data: expect.objectContaining({
233+
'gen_ai.operation.name': 'models',
234+
'sentry.op': 'gen_ai.models',
235+
'sentry.origin': 'auto.ai.google_genai',
236+
'gen_ai.system': 'google_genai',
237+
'gen_ai.request.model': 'blocked-model',
238+
'gen_ai.request.messages': expect.any(String), // Should include contents when recordInputs: true
239+
'gen_ai.response.streaming': true,
240+
}),
241+
description: 'models blocked-model stream-response',
242+
op: 'gen_ai.models',
243+
origin: 'auto.ai.google_genai',
244+
status: 'unknown_error',
245+
}),
246+
// Seventh span - error handling with PII
146247
expect.objectContaining({
147248
data: expect.objectContaining({
148249
'gen_ai.operation.name': 'models',
@@ -163,12 +264,22 @@ describe('Google GenAI integration', () => {
163264
const EXPECTED_TRANSACTION_WITH_OPTIONS = {
164265
transaction: 'main',
165266
spans: expect.arrayContaining([
166-
// Check that custom options are respected
267+
// Check that custom options are respected for non-streaming
167268
expect.objectContaining({
168269
data: expect.objectContaining({
169270
'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true
170271
'gen_ai.response.text': expect.any(String), // Should include response text when recordOutputs: true
171272
}),
273+
description: expect.not.stringContaining('stream-response'), // Non-streaming span
274+
}),
275+
// Check that custom options are respected for streaming
276+
expect.objectContaining({
277+
data: expect.objectContaining({
278+
'gen_ai.response.streaming': true,
279+
'gen_ai.request.messages': expect.any(String), // Should include messages when recordInputs: true
280+
'gen_ai.response.text': expect.stringContaining('streaming'), // Should include response text when recordOutputs: true
281+
}),
282+
description: expect.stringContaining('stream-response'),
172283
}),
173284
]),
174285
};

packages/core/src/utils/google-genai/constants.ts

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,15 @@ export const GOOGLE_GENAI_INTEGRATION_NAME = 'Google_GenAI';
22

33
// https://ai.google.dev/api/rest/v1/models/generateContent
44
// https://ai.google.dev/api/rest/v1/chats/sendMessage
5-
export const GOOGLE_GENAI_INSTRUMENTED_METHODS = ['models.generateContent', 'chats.create', 'sendMessage'] as const;
5+
// https://googleapis.github.io/js-genai/release_docs/classes/models.Models.html#generatecontentstream
6+
// https://googleapis.github.io/js-genai/release_docs/classes/chats.Chat.html#sendmessagestream
7+
export const GOOGLE_GENAI_INSTRUMENTED_METHODS = [
8+
'models.generateContent',
9+
'models.generateContentStream',
10+
'chats.create',
11+
'sendMessage',
12+
'sendMessageStream',
13+
] as const;
614

715
// Constants for internal use
816
export const GOOGLE_GENAI_SYSTEM_NAME = 'google_genai';

0 commit comments

Comments
 (0)