deepsite / test-dynamic-tokens.mjs
dr-data
Fix preview component: eliminate blinking, ensure HTML updates, add smooth transitions
dcd5e1d
// Test script to verify dynamic max_tokens calculation
const { calculateSafeMaxTokens, estimateTokenCount } = require('./lib/openrouter.js');
console.log("Testing dynamic max_tokens calculation:\n");
// Mock model scenarios based on real OpenRouter models
const testScenarios = [
{
name: "DeepSeek V3 (High context)",
contextLength: 131072,
maxCompletionTokens: 8192,
inputText: "System prompt and large HTML content with about 5000 tokens worth of text"
},
{
name: "Claude 3.5 Sonnet",
contextLength: 200000,
maxCompletionTokens: 8192,
inputText: "System prompt with moderate HTML content"
},
{
name: "GPT-4 Turbo",
contextLength: 128000,
maxCompletionTokens: 4096,
inputText: "System prompt with small context"
}
];
testScenarios.forEach((scenario, index) => {
const estimatedInputTokens = estimateTokenCount(scenario.inputText + "x".repeat(Math.random() * 10000));
const safeMaxTokens = calculateSafeMaxTokens(
scenario.contextLength,
estimatedInputTokens,
scenario.maxCompletionTokens
);
const totalUsage = estimatedInputTokens + safeMaxTokens;
const usagePercentage = ((totalUsage / scenario.contextLength) * 100).toFixed(1);
console.log(`${index + 1}. ${scenario.name}:`);
console.log(` Context Length: ${scenario.contextLength.toLocaleString()}`);
console.log(` Max Completion: ${scenario.maxCompletionTokens.toLocaleString()}`);
console.log(` Input Tokens: ${estimatedInputTokens.toLocaleString()}`);
console.log(` Safe Max Out: ${safeMaxTokens.toLocaleString()}`);
console.log(` Total Usage: ${totalUsage.toLocaleString()} (${usagePercentage}%)`);
console.log(` Within Limit: ${totalUsage <= scenario.contextLength ? '✅' : '❌'}`);
console.log('');
});
console.log("✅ Dynamic max_tokens calculation test completed!");