feat: Add Qwen3-VL-Plus token limits (256K input, 32K output) (#720)

- Added 256K input context window limit for Qwen3-VL-Plus model
- Updated output token limit from 8K to 32K for Qwen3-VL-Plus
- Added comprehensive tests for both input and output limits

As requested by Qwen maintainers for proper model support.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-authored-by: Claude <noreply@anthropic.com>
This commit is contained in:
Brando Magnani
2025-09-26 02:19:54 -07:00
committed by GitHub
parent c405434c41
commit f7841338c4
2 changed files with 8 additions and 2 deletions

View File

@@ -278,6 +278,11 @@ describe('tokenLimit with output type', () => {
expect(tokenLimit('qwen-vl-max-latest', 'output')).toBe(8192); // 8K output
});
it('should return different limits for input vs output for qwen3-vl-plus', () => {
expect(tokenLimit('qwen3-vl-plus', 'input')).toBe(262144); // 256K input
expect(tokenLimit('qwen3-vl-plus', 'output')).toBe(32768); // 32K output
});
it('should return same default limits for unknown models', () => {
expect(tokenLimit('unknown-model', 'input')).toBe(DEFAULT_TOKEN_LIMIT); // 128K input
expect(tokenLimit('unknown-model', 'output')).toBe(

View File

@@ -135,6 +135,7 @@ const PATTERNS: Array<[RegExp, TokenCount]> = [
[/^qwen-turbo.*$/, LIMITS['128k']],
// Qwen Vision Models
[/^qwen3-vl-plus$/, LIMITS['256k']], // Qwen3-VL-Plus: 256K input
[/^qwen-vl-max.*$/, LIMITS['128k']],
// Generic vision-model: same as qwen-vl-max (128K token context)
@@ -187,8 +188,8 @@ const OUTPUT_PATTERNS: Array<[RegExp, TokenCount]> = [
// Generic vision-model: same as qwen-vl-max-latest (8K max output tokens)
[/^vision-model$/, LIMITS['8k']],
// Qwen3-VL-Plus: 8,192 max output tokens
[/^qwen3-vl-plus$/, LIMITS['8k']],
// Qwen3-VL-Plus: 32K max output tokens
[/^qwen3-vl-plus$/, LIMITS['32k']],
];
/**