mirror of
https://github.com/QwenLM/qwen-code.git
synced 2025-12-19 09:33:53 +00:00
chore: format
This commit is contained in:
@@ -146,7 +146,9 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
|
|||||||
model: 'gpt-4',
|
model: 'gpt-4',
|
||||||
};
|
};
|
||||||
|
|
||||||
await expect(generator.generateContent(request, 'test-prompt-id')).rejects.toThrow(
|
await expect(
|
||||||
|
generator.generateContent(request, 'test-prompt-id'),
|
||||||
|
).rejects.toThrow(
|
||||||
/Request timeout after \d+s\. Try reducing input length or increasing timeout in config\./,
|
/Request timeout after \d+s\. Try reducing input length or increasing timeout in config\./,
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
@@ -161,9 +163,9 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
|
|||||||
model: 'gpt-4',
|
model: 'gpt-4',
|
||||||
};
|
};
|
||||||
|
|
||||||
await expect(generator.generateContent(request, 'test-prompt-id')).rejects.toThrow(
|
await expect(
|
||||||
'Invalid API key',
|
generator.generateContent(request, 'test-prompt-id'),
|
||||||
);
|
).rejects.toThrow('Invalid API key');
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should include troubleshooting tips for timeout errors', async () => {
|
it('should include troubleshooting tips for timeout errors', async () => {
|
||||||
@@ -199,7 +201,9 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
|
|||||||
model: 'gpt-4',
|
model: 'gpt-4',
|
||||||
};
|
};
|
||||||
|
|
||||||
await expect(generator.generateContentStream(request, 'test-prompt-id')).rejects.toThrow(
|
await expect(
|
||||||
|
generator.generateContentStream(request, 'test-prompt-id'),
|
||||||
|
).rejects.toThrow(
|
||||||
/Streaming setup timeout after \d+s\. Try reducing input length or increasing timeout in config\./,
|
/Streaming setup timeout after \d+s\. Try reducing input length or increasing timeout in config\./,
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
@@ -324,9 +328,9 @@ describe('OpenAIContentGenerator Timeout Handling', () => {
|
|||||||
};
|
};
|
||||||
|
|
||||||
// Should not throw due to token counting failure
|
// Should not throw due to token counting failure
|
||||||
await expect(generator.generateContent(request, 'test-prompt-id')).rejects.toThrow(
|
await expect(
|
||||||
/Request timeout after \d+s/,
|
generator.generateContent(request, 'test-prompt-id'),
|
||||||
);
|
).rejects.toThrow(/Request timeout after \d+s/);
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|||||||
@@ -439,7 +439,10 @@ describe('OpenAIContentGenerator', () => {
|
|||||||
model: 'gpt-4',
|
model: 'gpt-4',
|
||||||
};
|
};
|
||||||
|
|
||||||
const stream = await generator.generateContentStream(request, 'test-prompt-id');
|
const stream = await generator.generateContentStream(
|
||||||
|
request,
|
||||||
|
'test-prompt-id',
|
||||||
|
);
|
||||||
const responses = [];
|
const responses = [];
|
||||||
for await (const response of stream) {
|
for await (const response of stream) {
|
||||||
responses.push(response);
|
responses.push(response);
|
||||||
@@ -528,7 +531,10 @@ describe('OpenAIContentGenerator', () => {
|
|||||||
model: 'gpt-4',
|
model: 'gpt-4',
|
||||||
};
|
};
|
||||||
|
|
||||||
const stream = await generator.generateContentStream(request, 'test-prompt-id');
|
const stream = await generator.generateContentStream(
|
||||||
|
request,
|
||||||
|
'test-prompt-id',
|
||||||
|
);
|
||||||
const responses = [];
|
const responses = [];
|
||||||
for await (const response of stream) {
|
for await (const response of stream) {
|
||||||
responses.push(response);
|
responses.push(response);
|
||||||
@@ -668,9 +674,9 @@ describe('OpenAIContentGenerator', () => {
|
|||||||
model: 'gpt-4',
|
model: 'gpt-4',
|
||||||
};
|
};
|
||||||
|
|
||||||
await expect(generator.generateContent(request, 'test-prompt-id')).rejects.toThrow(
|
await expect(
|
||||||
'Invalid API key',
|
generator.generateContent(request, 'test-prompt-id'),
|
||||||
);
|
).rejects.toThrow('Invalid API key');
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should estimate tokens on error for telemetry', async () => {
|
it('should estimate tokens on error for telemetry', async () => {
|
||||||
@@ -872,7 +878,10 @@ describe('OpenAIContentGenerator', () => {
|
|||||||
model: 'gpt-4',
|
model: 'gpt-4',
|
||||||
};
|
};
|
||||||
|
|
||||||
const result = await generator.generateContent(request, 'test-prompt-id');
|
const result = await generator.generateContent(
|
||||||
|
request,
|
||||||
|
'test-prompt-id',
|
||||||
|
);
|
||||||
if (
|
if (
|
||||||
result.candidates &&
|
result.candidates &&
|
||||||
result.candidates.length > 0 &&
|
result.candidates.length > 0 &&
|
||||||
@@ -969,7 +978,9 @@ describe('OpenAIContentGenerator', () => {
|
|||||||
model: 'gpt-4',
|
model: 'gpt-4',
|
||||||
};
|
};
|
||||||
|
|
||||||
await expect(generator.generateContent(request, 'test-prompt-id')).rejects.toThrow(
|
await expect(
|
||||||
|
generator.generateContent(request, 'test-prompt-id'),
|
||||||
|
).rejects.toThrow(
|
||||||
/Troubleshooting tips.*Reduce input length.*Increase timeout.*Check network/s,
|
/Troubleshooting tips.*Reduce input length.*Increase timeout.*Check network/s,
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
@@ -985,9 +996,9 @@ describe('OpenAIContentGenerator', () => {
|
|||||||
model: 'gpt-4',
|
model: 'gpt-4',
|
||||||
};
|
};
|
||||||
|
|
||||||
await expect(generator.generateContent(request, 'test-prompt-id')).rejects.toThrow(
|
await expect(
|
||||||
'Streaming setup failed',
|
generator.generateContent(request, 'test-prompt-id'),
|
||||||
);
|
).rejects.toThrow('Streaming setup failed');
|
||||||
});
|
});
|
||||||
|
|
||||||
it('should handle timeout errors during streaming setup', async () => {
|
it('should handle timeout errors during streaming setup', async () => {
|
||||||
@@ -999,7 +1010,9 @@ describe('OpenAIContentGenerator', () => {
|
|||||||
model: 'gpt-4',
|
model: 'gpt-4',
|
||||||
};
|
};
|
||||||
|
|
||||||
await expect(generator.generateContentStream(request, 'test-prompt-id')).rejects.toThrow(
|
await expect(
|
||||||
|
generator.generateContentStream(request, 'test-prompt-id'),
|
||||||
|
).rejects.toThrow(
|
||||||
/Streaming setup timeout troubleshooting.*Reduce input length/s,
|
/Streaming setup timeout troubleshooting.*Reduce input length/s,
|
||||||
);
|
);
|
||||||
});
|
});
|
||||||
@@ -1042,7 +1055,10 @@ describe('OpenAIContentGenerator', () => {
|
|||||||
model: 'gpt-4',
|
model: 'gpt-4',
|
||||||
};
|
};
|
||||||
|
|
||||||
const stream = await loggingGenerator.generateContentStream(request, 'test-prompt-id');
|
const stream = await loggingGenerator.generateContentStream(
|
||||||
|
request,
|
||||||
|
'test-prompt-id',
|
||||||
|
);
|
||||||
|
|
||||||
// Consume the stream and expect error
|
// Consume the stream and expect error
|
||||||
await expect(async () => {
|
await expect(async () => {
|
||||||
@@ -1495,7 +1511,9 @@ describe('OpenAIContentGenerator', () => {
|
|||||||
model: 'gpt-4',
|
model: 'gpt-4',
|
||||||
};
|
};
|
||||||
|
|
||||||
await expect(testGenerator.generateContent(request, 'test-prompt-id')).rejects.toThrow();
|
await expect(
|
||||||
|
testGenerator.generateContent(request, 'test-prompt-id'),
|
||||||
|
).rejects.toThrow();
|
||||||
|
|
||||||
// Error logging should be suppressed
|
// Error logging should be suppressed
|
||||||
expect(consoleSpy).not.toHaveBeenCalledWith(
|
expect(consoleSpy).not.toHaveBeenCalledWith(
|
||||||
@@ -1519,7 +1537,9 @@ describe('OpenAIContentGenerator', () => {
|
|||||||
model: 'gpt-4',
|
model: 'gpt-4',
|
||||||
};
|
};
|
||||||
|
|
||||||
await expect(generator.generateContent(request, 'test-prompt-id')).rejects.toThrow();
|
await expect(
|
||||||
|
generator.generateContent(request, 'test-prompt-id'),
|
||||||
|
).rejects.toThrow();
|
||||||
|
|
||||||
// Error logging should occur by default
|
// Error logging should occur by default
|
||||||
expect(consoleSpy).toHaveBeenCalledWith(
|
expect(consoleSpy).toHaveBeenCalledWith(
|
||||||
@@ -1643,7 +1663,10 @@ describe('OpenAIContentGenerator', () => {
|
|||||||
model: 'gpt-4',
|
model: 'gpt-4',
|
||||||
};
|
};
|
||||||
|
|
||||||
const stream = await generator.generateContentStream(request, 'test-prompt-id');
|
const stream = await generator.generateContentStream(
|
||||||
|
request,
|
||||||
|
'test-prompt-id',
|
||||||
|
);
|
||||||
const responses = [];
|
const responses = [];
|
||||||
for await (const response of stream) {
|
for await (const response of stream) {
|
||||||
responses.push(response);
|
responses.push(response);
|
||||||
@@ -2039,7 +2062,10 @@ describe('OpenAIContentGenerator', () => {
|
|||||||
model: 'gpt-4',
|
model: 'gpt-4',
|
||||||
};
|
};
|
||||||
|
|
||||||
const stream = await loggingGenerator.generateContentStream(request, 'test-prompt-id');
|
const stream = await loggingGenerator.generateContentStream(
|
||||||
|
request,
|
||||||
|
'test-prompt-id',
|
||||||
|
);
|
||||||
const responses = [];
|
const responses = [];
|
||||||
for await (const response of stream) {
|
for await (const response of stream) {
|
||||||
responses.push(response);
|
responses.push(response);
|
||||||
@@ -2083,7 +2109,10 @@ describe('OpenAIContentGenerator', () => {
|
|||||||
model: 'gpt-4',
|
model: 'gpt-4',
|
||||||
};
|
};
|
||||||
|
|
||||||
const stream = await generator.generateContentStream(request, 'test-prompt-id');
|
const stream = await generator.generateContentStream(
|
||||||
|
request,
|
||||||
|
'test-prompt-id',
|
||||||
|
);
|
||||||
const responses = [];
|
const responses = [];
|
||||||
for await (const response of stream) {
|
for await (const response of stream) {
|
||||||
responses.push(response);
|
responses.push(response);
|
||||||
|
|||||||
@@ -326,7 +326,7 @@ export class OpenAIContentGenerator implements ContentGenerator {
|
|||||||
): Promise<AsyncGenerator<GenerateContentResponse>> {
|
): Promise<AsyncGenerator<GenerateContentResponse>> {
|
||||||
const startTime = Date.now();
|
const startTime = Date.now();
|
||||||
const messages = this.convertToOpenAIFormat(request);
|
const messages = this.convertToOpenAIFormat(request);
|
||||||
|
|
||||||
try {
|
try {
|
||||||
// Build sampling parameters with clear priority
|
// Build sampling parameters with clear priority
|
||||||
const samplingParams = this.buildSamplingParameters(request);
|
const samplingParams = this.buildSamplingParameters(request);
|
||||||
|
|||||||
@@ -154,7 +154,10 @@ describe('QwenContentGenerator', () => {
|
|||||||
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
|
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
|
||||||
};
|
};
|
||||||
|
|
||||||
const result = await qwenContentGenerator.generateContent(request, 'test-prompt-id');
|
const result = await qwenContentGenerator.generateContent(
|
||||||
|
request,
|
||||||
|
'test-prompt-id',
|
||||||
|
);
|
||||||
|
|
||||||
expect(result.text).toBe('Generated content');
|
expect(result.text).toBe('Generated content');
|
||||||
expect(mockQwenClient.getAccessToken).toHaveBeenCalled();
|
expect(mockQwenClient.getAccessToken).toHaveBeenCalled();
|
||||||
@@ -171,7 +174,10 @@ describe('QwenContentGenerator', () => {
|
|||||||
contents: [{ role: 'user', parts: [{ text: 'Hello stream' }] }],
|
contents: [{ role: 'user', parts: [{ text: 'Hello stream' }] }],
|
||||||
};
|
};
|
||||||
|
|
||||||
const stream = await qwenContentGenerator.generateContentStream(request, 'test-prompt-id');
|
const stream = await qwenContentGenerator.generateContentStream(
|
||||||
|
request,
|
||||||
|
'test-prompt-id',
|
||||||
|
);
|
||||||
const chunks: string[] = [];
|
const chunks: string[] = [];
|
||||||
|
|
||||||
for await (const chunk of stream) {
|
for await (const chunk of stream) {
|
||||||
@@ -238,7 +244,10 @@ describe('QwenContentGenerator', () => {
|
|||||||
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
|
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
|
||||||
};
|
};
|
||||||
|
|
||||||
const result = await qwenContentGenerator.generateContent(request, 'test-prompt-id');
|
const result = await qwenContentGenerator.generateContent(
|
||||||
|
request,
|
||||||
|
'test-prompt-id',
|
||||||
|
);
|
||||||
|
|
||||||
expect(result.text).toBe('Generated content');
|
expect(result.text).toBe('Generated content');
|
||||||
expect(mockQwenClient.refreshAccessToken).toHaveBeenCalled();
|
expect(mockQwenClient.refreshAccessToken).toHaveBeenCalled();
|
||||||
@@ -545,7 +554,10 @@ describe('QwenContentGenerator', () => {
|
|||||||
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
|
contents: [{ role: 'user', parts: [{ text: 'Hello' }] }],
|
||||||
};
|
};
|
||||||
|
|
||||||
const result = await qwenContentGenerator.generateContent(request, 'test-prompt-id');
|
const result = await qwenContentGenerator.generateContent(
|
||||||
|
request,
|
||||||
|
'test-prompt-id',
|
||||||
|
);
|
||||||
|
|
||||||
expect(result.text).toBe('Success after retry');
|
expect(result.text).toBe('Success after retry');
|
||||||
expect(mockGenerateContent).toHaveBeenCalledTimes(2);
|
expect(mockGenerateContent).toHaveBeenCalledTimes(2);
|
||||||
@@ -795,7 +807,10 @@ describe('QwenContentGenerator', () => {
|
|||||||
contents: [{ role: 'user', parts: [{ text: 'Test message' }] }],
|
contents: [{ role: 'user', parts: [{ text: 'Test message' }] }],
|
||||||
};
|
};
|
||||||
|
|
||||||
const result = await qwenContentGenerator.generateContent(request, 'test-prompt-id');
|
const result = await qwenContentGenerator.generateContent(
|
||||||
|
request,
|
||||||
|
'test-prompt-id',
|
||||||
|
);
|
||||||
|
|
||||||
expect(result.text).toBe('Success after refresh');
|
expect(result.text).toBe('Success after refresh');
|
||||||
expect(mockQwenClient.getAccessToken).toHaveBeenCalled();
|
expect(mockQwenClient.getAccessToken).toHaveBeenCalled();
|
||||||
|
|||||||
Reference in New Issue
Block a user