directus-task-management/dist/tests/services/ai/openai.service.test.js

445 lines
17 KiB
JavaScript

"use strict";
var __importDefault = (this && this.__importDefault) || function (mod) {
return (mod && mod.__esModule) ? mod : { "default": mod };
};
Object.defineProperty(exports, "__esModule", { value: true });
const openai_service_1 = require("../../../src/services/ai/openai.service");
const openai_1 = __importDefault(require("openai"));
// Mock OpenAI
jest.mock('openai');
describe('OpenAIService', () => {
let service;
let mockClient;
let mockConfig;
beforeEach(() => {
// Reset mocks
jest.clearAllMocks();
// Create mock config
mockConfig = {
openai: {
apiKey: 'test-api-key',
model: 'gpt-4-turbo',
maxTokens: 4096,
temperature: 0.7,
retryAttempts: 3,
retryDelayMs: 100,
timeout: 30000,
},
langchain: {
verbose: false,
cacheEnabled: true,
cacheTTL: 3600,
maxConcurrency: 5,
memoryBufferSize: 10,
},
rateLimit: {
maxRequestsPerMinute: 60,
maxTokensPerMinute: 90000,
maxRequestsPerDay: 10000,
},
monitoring: {
trackTokenUsage: true,
logLevel: 'info',
metricsEnabled: true,
},
};
// Create service instance
service = new openai_service_1.OpenAIService(mockConfig);
// Get mock client instance
mockClient = openai_1.default.mock.instances[0];
});
describe('complete', () => {
it('should successfully complete a chat request', async () => {
// Mock successful response
const mockResponse = {
id: 'test-id',
object: 'chat.completion',
created: Date.now(),
model: 'gpt-4-turbo',
choices: [{
index: 0,
message: {
role: 'assistant',
content: 'Test response',
},
finish_reason: 'stop',
}],
usage: {
prompt_tokens: 10,
completion_tokens: 20,
total_tokens: 30,
},
};
mockClient.chat = {
completions: {
create: jest.fn().mockResolvedValue(mockResponse),
},
};
const result = await service.complete([
{ role: 'user', content: 'Test message' },
]);
expect(result).toEqual({
content: 'Test response',
promptTokens: 10,
completionTokens: 20,
totalTokens: 30,
model: 'gpt-4-turbo',
finishReason: 'stop',
executionTimeMs: expect.any(Number),
});
expect(mockClient.chat.completions.create).toHaveBeenCalledWith({
model: 'gpt-4-turbo',
messages: [{ role: 'user', content: 'Test message' }],
temperature: 0.7,
max_tokens: 4096,
top_p: undefined,
frequency_penalty: undefined,
presence_penalty: undefined,
stop: undefined,
response_format: undefined,
});
});
it('should include system prompt when provided', async () => {
const mockResponse = {
choices: [{
message: { content: 'Response' },
finish_reason: 'stop',
}],
usage: {
prompt_tokens: 15,
completion_tokens: 10,
total_tokens: 25,
},
model: 'gpt-4-turbo',
};
mockClient.chat = {
completions: {
create: jest.fn().mockResolvedValue(mockResponse),
},
};
await service.complete([{ role: 'user', content: 'Test' }], { systemPrompt: 'You are a helpful assistant' });
expect(mockClient.chat.completions.create).toHaveBeenCalledWith(expect.objectContaining({
messages: [
{ role: 'system', content: 'You are a helpful assistant' },
{ role: 'user', content: 'Test' },
],
}));
});
it('should handle JSON response format', async () => {
const mockResponse = {
choices: [{
message: { content: '{"key": "value"}' },
finish_reason: 'stop',
}],
usage: {
prompt_tokens: 10,
completion_tokens: 15,
total_tokens: 25,
},
model: 'gpt-4-turbo',
};
mockClient.chat = {
completions: {
create: jest.fn().mockResolvedValue(mockResponse),
},
};
await service.complete([{ role: 'user', content: 'Return JSON' }], { responseFormat: 'json' });
expect(mockClient.chat.completions.create).toHaveBeenCalledWith(expect.objectContaining({
response_format: { type: 'json_object' },
}));
});
});
describe('retry logic', () => {
it('should retry on rate limit error (429)', async () => {
const mockError = new Error('Rate limit exceeded');
mockError.status = 429;
const mockResponse = {
choices: [{
message: { content: 'Success after retry' },
finish_reason: 'stop',
}],
usage: {
prompt_tokens: 10,
completion_tokens: 10,
total_tokens: 20,
},
model: 'gpt-4-turbo',
};
mockClient.chat = {
completions: {
create: jest.fn()
.mockRejectedValueOnce(mockError)
.mockResolvedValueOnce(mockResponse),
},
};
const result = await service.complete([
{ role: 'user', content: 'Test' },
]);
expect(result.content).toBe('Success after retry');
expect(mockClient.chat.completions.create).toHaveBeenCalledTimes(2);
});
it('should retry on server error (500+)', async () => {
const mockError = new Error('Internal server error');
mockError.status = 500;
const mockResponse = {
choices: [{
message: { content: 'Success' },
finish_reason: 'stop',
}],
usage: {
prompt_tokens: 5,
completion_tokens: 5,
total_tokens: 10,
},
model: 'gpt-4-turbo',
};
mockClient.chat = {
completions: {
create: jest.fn()
.mockRejectedValueOnce(mockError)
.mockRejectedValueOnce(mockError)
.mockResolvedValueOnce(mockResponse),
},
};
const result = await service.complete([
{ role: 'user', content: 'Test' },
]);
expect(result.content).toBe('Success');
expect(mockClient.chat.completions.create).toHaveBeenCalledTimes(3);
});
it('should not retry on client error (400)', async () => {
const mockError = new Error('Bad request');
mockError.status = 400;
mockClient.chat = {
completions: {
create: jest.fn().mockRejectedValue(mockError),
},
};
await expect(service.complete([
{ role: 'user', content: 'Test' },
])).rejects.toThrow(openai_service_1.OpenAIServiceError);
expect(mockClient.chat.completions.create).toHaveBeenCalledTimes(1);
});
it('should respect retry limit', async () => {
const mockError = new Error('Server error');
mockError.status = 500;
mockClient.chat = {
completions: {
create: jest.fn().mockRejectedValue(mockError),
},
};
await expect(service.complete([
{ role: 'user', content: 'Test' },
])).rejects.toThrow(openai_service_1.OpenAIServiceError);
// Should attempt initial + 3 retries = 4 total
expect(mockClient.chat.completions.create).toHaveBeenCalledTimes(4);
});
it('should handle retry-after header', async () => {
const mockError = new Error('Rate limited');
mockError.status = 429;
mockError.headers = { 'retry-after': '2' };
const mockResponse = {
choices: [{
message: { content: 'Success' },
finish_reason: 'stop',
}],
usage: {
prompt_tokens: 10,
completion_tokens: 10,
total_tokens: 20,
},
model: 'gpt-4-turbo',
};
mockClient.chat = {
completions: {
create: jest.fn()
.mockRejectedValueOnce(mockError)
.mockResolvedValueOnce(mockResponse),
},
};
const startTime = Date.now();
await service.complete([{ role: 'user', content: 'Test' }]);
const elapsed = Date.now() - startTime;
// Should wait at least 2000ms due to retry-after header
expect(elapsed).toBeGreaterThanOrEqual(2000);
});
});
describe('streamComplete', () => {
it('should handle streaming responses', async () => {
const chunks = [
{ choices: [{ delta: { content: 'Hello' } }] },
{ choices: [{ delta: { content: ' world' } }] },
{ choices: [{ finish_reason: 'stop' }] },
];
const mockStream = {
[Symbol.asyncIterator]: async function* () {
for (const chunk of chunks) {
yield chunk;
}
},
};
mockClient.chat = {
completions: {
create: jest.fn().mockResolvedValue(mockStream),
},
};
const receivedChunks = [];
const result = await service.streamComplete([{ role: 'user', content: 'Test' }], {}, (chunk) => receivedChunks.push(chunk));
expect(result.content).toBe('Hello world');
expect(result.finishReason).toBe('stop');
expect(receivedChunks).toEqual(['Hello', ' world']);
});
});
describe('usage statistics', () => {
it('should track token usage', async () => {
const mockResponse1 = {
choices: [{
message: { content: 'Response 1' },
finish_reason: 'stop',
}],
usage: {
prompt_tokens: 10,
completion_tokens: 20,
total_tokens: 30,
},
model: 'gpt-4-turbo',
};
const mockResponse2 = {
choices: [{
message: { content: 'Response 2' },
finish_reason: 'stop',
}],
usage: {
prompt_tokens: 15,
completion_tokens: 25,
total_tokens: 40,
},
model: 'gpt-4-turbo',
};
mockClient.chat = {
completions: {
create: jest.fn()
.mockResolvedValueOnce(mockResponse1)
.mockResolvedValueOnce(mockResponse2),
},
};
await service.complete([{ role: 'user', content: 'Test 1' }]);
await service.complete([{ role: 'user', content: 'Test 2' }]);
const stats = service.getUsageStats();
expect(stats.requestCount).toBe(2);
expect(stats.totalTokensUsed).toBe(70);
expect(stats.estimatedCost).toBeCloseTo(0.0014, 4);
});
it('should reset usage statistics', async () => {
const mockResponse = {
choices: [{
message: { content: 'Response' },
finish_reason: 'stop',
}],
usage: {
prompt_tokens: 10,
completion_tokens: 20,
total_tokens: 30,
},
model: 'gpt-4-turbo',
};
mockClient.chat = {
completions: {
create: jest.fn().mockResolvedValue(mockResponse),
},
};
await service.complete([{ role: 'user', content: 'Test' }]);
let stats = service.getUsageStats();
expect(stats.requestCount).toBe(1);
expect(stats.totalTokensUsed).toBe(30);
service.resetUsageStats();
stats = service.getUsageStats();
expect(stats.requestCount).toBe(0);
expect(stats.totalTokensUsed).toBe(0);
});
});
describe('testConnection', () => {
it('should return true when connection is successful', async () => {
const mockResponse = {
choices: [{
message: { content: 'OK' },
finish_reason: 'stop',
}],
usage: {
prompt_tokens: 5,
completion_tokens: 2,
total_tokens: 7,
},
model: 'gpt-4-turbo',
};
mockClient.chat = {
completions: {
create: jest.fn().mockResolvedValue(mockResponse),
},
};
const result = await service.testConnection();
expect(result).toBe(true);
});
it('should return false when connection fails', async () => {
mockClient.chat = {
completions: {
create: jest.fn().mockRejectedValue(new Error('Connection failed')),
},
};
const result = await service.testConnection();
expect(result).toBe(false);
});
});
describe('error handling', () => {
it('should wrap errors with OpenAIServiceError', async () => {
const originalError = new Error('API error');
originalError.status = 400;
originalError.code = 'invalid_request';
mockClient.chat = {
completions: {
create: jest.fn().mockRejectedValue(originalError),
},
};
try {
await service.complete([{ role: 'user', content: 'Test' }]);
fail('Should have thrown error');
}
catch (error) {
expect(error).toBeInstanceOf(openai_service_1.OpenAIServiceError);
expect(error.message).toContain('OpenAI API error');
expect(error.code).toBe('invalid_request');
expect(error.statusCode).toBe(400);
expect(error.retryable).toBe(false);
}
});
it('should identify retryable errors correctly', async () => {
const retryableErrors = [
{ status: 429, expected: true }, // Rate limit
{ status: 500, expected: true }, // Server error
{ status: 503, expected: true }, // Service unavailable
{ code: 'ECONNRESET', expected: true }, // Network error
{ status: 400, expected: false }, // Client error
{ status: 401, expected: false }, // Unauthorized
];
for (const { status, code, expected } of retryableErrors) {
const error = new Error('Test error');
if (status)
error.status = status;
if (code)
error.code = code;
mockClient.chat = {
completions: {
create: jest.fn().mockRejectedValue(error),
},
};
try {
await service.complete([{ role: 'user', content: 'Test' }]);
}
catch (error) {
expect(error.retryable).toBe(expected);
}
}
});
});
});
//# sourceMappingURL=openai.service.test.js.map