directus-task-management/tests/services/ai/openai.service.test.ts

520 lines
14 KiB
TypeScript

import { OpenAIService, OpenAIServiceError } from '../../../src/services/ai/openai.service';
import { AIConfig } from '../../../src/config/ai.config';
import OpenAI from 'openai';
// Mock OpenAI
jest.mock('openai');
describe('OpenAIService', () => {
let service: OpenAIService;
let mockClient: jest.Mocked<OpenAI>;
let mockConfig: AIConfig;
beforeEach(() => {
// Reset mocks
jest.clearAllMocks();
// Create mock config
mockConfig = {
openai: {
apiKey: 'test-api-key',
model: 'gpt-4-turbo',
maxTokens: 4096,
temperature: 0.7,
retryAttempts: 3,
retryDelayMs: 100,
timeout: 30000,
},
langchain: {
verbose: false,
cacheEnabled: true,
cacheTTL: 3600,
maxConcurrency: 5,
memoryBufferSize: 10,
},
rateLimit: {
maxRequestsPerMinute: 60,
maxTokensPerMinute: 90000,
maxRequestsPerDay: 10000,
},
monitoring: {
trackTokenUsage: true,
logLevel: 'info',
metricsEnabled: true,
},
};
// Create service instance
service = new OpenAIService(mockConfig);
// Get mock client instance
mockClient = (OpenAI as jest.MockedClass<typeof OpenAI>).mock.instances[0] as jest.Mocked<OpenAI>;
});
describe('complete', () => {
it('should successfully complete a chat request', async () => {
// Mock successful response
const mockResponse = {
id: 'test-id',
object: 'chat.completion',
created: Date.now(),
model: 'gpt-4-turbo',
choices: [{
index: 0,
message: {
role: 'assistant' as const,
content: 'Test response',
},
finish_reason: 'stop',
}],
usage: {
prompt_tokens: 10,
completion_tokens: 20,
total_tokens: 30,
},
};
mockClient.chat = {
completions: {
create: jest.fn().mockResolvedValue(mockResponse),
},
} as any;
const result = await service.complete([
{ role: 'user', content: 'Test message' },
]);
expect(result).toEqual({
content: 'Test response',
promptTokens: 10,
completionTokens: 20,
totalTokens: 30,
model: 'gpt-4-turbo',
finishReason: 'stop',
executionTimeMs: expect.any(Number),
});
expect(mockClient.chat.completions.create).toHaveBeenCalledWith({
model: 'gpt-4-turbo',
messages: [{ role: 'user', content: 'Test message' }],
temperature: 0.7,
max_tokens: 4096,
top_p: undefined,
frequency_penalty: undefined,
presence_penalty: undefined,
stop: undefined,
response_format: undefined,
});
});
it('should include system prompt when provided', async () => {
const mockResponse = {
choices: [{
message: { content: 'Response' },
finish_reason: 'stop',
}],
usage: {
prompt_tokens: 15,
completion_tokens: 10,
total_tokens: 25,
},
model: 'gpt-4-turbo',
};
mockClient.chat = {
completions: {
create: jest.fn().mockResolvedValue(mockResponse),
},
} as any;
await service.complete(
[{ role: 'user', content: 'Test' }],
{ systemPrompt: 'You are a helpful assistant' },
);
expect(mockClient.chat.completions.create).toHaveBeenCalledWith(
expect.objectContaining({
messages: [
{ role: 'system', content: 'You are a helpful assistant' },
{ role: 'user', content: 'Test' },
],
}),
);
});
it('should handle JSON response format', async () => {
const mockResponse = {
choices: [{
message: { content: '{"key": "value"}' },
finish_reason: 'stop',
}],
usage: {
prompt_tokens: 10,
completion_tokens: 15,
total_tokens: 25,
},
model: 'gpt-4-turbo',
};
mockClient.chat = {
completions: {
create: jest.fn().mockResolvedValue(mockResponse),
},
} as any;
await service.complete(
[{ role: 'user', content: 'Return JSON' }],
{ responseFormat: 'json' },
);
expect(mockClient.chat.completions.create).toHaveBeenCalledWith(
expect.objectContaining({
response_format: { type: 'json_object' },
}),
);
});
});
describe('retry logic', () => {
it('should retry on rate limit error (429)', async () => {
const mockError = new Error('Rate limit exceeded');
(mockError as any).status = 429;
const mockResponse = {
choices: [{
message: { content: 'Success after retry' },
finish_reason: 'stop',
}],
usage: {
prompt_tokens: 10,
completion_tokens: 10,
total_tokens: 20,
},
model: 'gpt-4-turbo',
};
mockClient.chat = {
completions: {
create: jest.fn()
.mockRejectedValueOnce(mockError)
.mockResolvedValueOnce(mockResponse),
},
} as any;
const result = await service.complete([
{ role: 'user', content: 'Test' },
]);
expect(result.content).toBe('Success after retry');
expect(mockClient.chat.completions.create).toHaveBeenCalledTimes(2);
});
it('should retry on server error (500+)', async () => {
const mockError = new Error('Internal server error');
(mockError as any).status = 500;
const mockResponse = {
choices: [{
message: { content: 'Success' },
finish_reason: 'stop',
}],
usage: {
prompt_tokens: 5,
completion_tokens: 5,
total_tokens: 10,
},
model: 'gpt-4-turbo',
};
mockClient.chat = {
completions: {
create: jest.fn()
.mockRejectedValueOnce(mockError)
.mockRejectedValueOnce(mockError)
.mockResolvedValueOnce(mockResponse),
},
} as any;
const result = await service.complete([
{ role: 'user', content: 'Test' },
]);
expect(result.content).toBe('Success');
expect(mockClient.chat.completions.create).toHaveBeenCalledTimes(3);
});
it('should not retry on client error (400)', async () => {
const mockError = new Error('Bad request');
(mockError as any).status = 400;
mockClient.chat = {
completions: {
create: jest.fn().mockRejectedValue(mockError),
},
} as any;
await expect(service.complete([
{ role: 'user', content: 'Test' },
])).rejects.toThrow(OpenAIServiceError);
expect(mockClient.chat.completions.create).toHaveBeenCalledTimes(1);
});
it('should respect retry limit', async () => {
const mockError = new Error('Server error');
(mockError as any).status = 500;
mockClient.chat = {
completions: {
create: jest.fn().mockRejectedValue(mockError),
},
} as any;
await expect(service.complete([
{ role: 'user', content: 'Test' },
])).rejects.toThrow(OpenAIServiceError);
// Should attempt initial + 3 retries = 4 total
expect(mockClient.chat.completions.create).toHaveBeenCalledTimes(4);
});
it('should handle retry-after header', async () => {
const mockError = new Error('Rate limited');
(mockError as any).status = 429;
(mockError as any).headers = { 'retry-after': '2' };
const mockResponse = {
choices: [{
message: { content: 'Success' },
finish_reason: 'stop',
}],
usage: {
prompt_tokens: 10,
completion_tokens: 10,
total_tokens: 20,
},
model: 'gpt-4-turbo',
};
mockClient.chat = {
completions: {
create: jest.fn()
.mockRejectedValueOnce(mockError)
.mockResolvedValueOnce(mockResponse),
},
} as any;
const startTime = Date.now();
await service.complete([{ role: 'user', content: 'Test' }]);
const elapsed = Date.now() - startTime;
// Should wait at least 2000ms due to retry-after header
expect(elapsed).toBeGreaterThanOrEqual(2000);
});
});
describe('streamComplete', () => {
it('should handle streaming responses', async () => {
const chunks = [
{ choices: [{ delta: { content: 'Hello' } }] },
{ choices: [{ delta: { content: ' world' } }] },
{ choices: [{ finish_reason: 'stop' }] },
];
const mockStream = {
[Symbol.asyncIterator]: async function* () {
for (const chunk of chunks) {
yield chunk;
}
},
};
mockClient.chat = {
completions: {
create: jest.fn().mockResolvedValue(mockStream),
},
} as any;
const receivedChunks: string[] = [];
const result = await service.streamComplete(
[{ role: 'user', content: 'Test' }],
{},
(chunk) => receivedChunks.push(chunk),
);
expect(result.content).toBe('Hello world');
expect(result.finishReason).toBe('stop');
expect(receivedChunks).toEqual(['Hello', ' world']);
});
});
describe('usage statistics', () => {
it('should track token usage', async () => {
const mockResponse1 = {
choices: [{
message: { content: 'Response 1' },
finish_reason: 'stop',
}],
usage: {
prompt_tokens: 10,
completion_tokens: 20,
total_tokens: 30,
},
model: 'gpt-4-turbo',
};
const mockResponse2 = {
choices: [{
message: { content: 'Response 2' },
finish_reason: 'stop',
}],
usage: {
prompt_tokens: 15,
completion_tokens: 25,
total_tokens: 40,
},
model: 'gpt-4-turbo',
};
mockClient.chat = {
completions: {
create: jest.fn()
.mockResolvedValueOnce(mockResponse1)
.mockResolvedValueOnce(mockResponse2),
},
} as any;
await service.complete([{ role: 'user', content: 'Test 1' }]);
await service.complete([{ role: 'user', content: 'Test 2' }]);
const stats = service.getUsageStats();
expect(stats.requestCount).toBe(2);
expect(stats.totalTokensUsed).toBe(70);
expect(stats.estimatedCost).toBeCloseTo(0.0014, 4);
});
it('should reset usage statistics', async () => {
const mockResponse = {
choices: [{
message: { content: 'Response' },
finish_reason: 'stop',
}],
usage: {
prompt_tokens: 10,
completion_tokens: 20,
total_tokens: 30,
},
model: 'gpt-4-turbo',
};
mockClient.chat = {
completions: {
create: jest.fn().mockResolvedValue(mockResponse),
},
} as any;
await service.complete([{ role: 'user', content: 'Test' }]);
let stats = service.getUsageStats();
expect(stats.requestCount).toBe(1);
expect(stats.totalTokensUsed).toBe(30);
service.resetUsageStats();
stats = service.getUsageStats();
expect(stats.requestCount).toBe(0);
expect(stats.totalTokensUsed).toBe(0);
});
});
describe('testConnection', () => {
it('should return true when connection is successful', async () => {
const mockResponse = {
choices: [{
message: { content: 'OK' },
finish_reason: 'stop',
}],
usage: {
prompt_tokens: 5,
completion_tokens: 2,
total_tokens: 7,
},
model: 'gpt-4-turbo',
};
mockClient.chat = {
completions: {
create: jest.fn().mockResolvedValue(mockResponse),
},
} as any;
const result = await service.testConnection();
expect(result).toBe(true);
});
it('should return false when connection fails', async () => {
mockClient.chat = {
completions: {
create: jest.fn().mockRejectedValue(new Error('Connection failed')),
},
} as any;
const result = await service.testConnection();
expect(result).toBe(false);
});
});
describe('error handling', () => {
it('should wrap errors with OpenAIServiceError', async () => {
const originalError = new Error('API error');
(originalError as any).status = 400;
(originalError as any).code = 'invalid_request';
mockClient.chat = {
completions: {
create: jest.fn().mockRejectedValue(originalError),
},
} as any;
try {
await service.complete([{ role: 'user', content: 'Test' }]);
fail('Should have thrown error');
} catch (error) {
expect(error).toBeInstanceOf(OpenAIServiceError);
expect((error as OpenAIServiceError).message).toContain('OpenAI API error');
expect((error as OpenAIServiceError).code).toBe('invalid_request');
expect((error as OpenAIServiceError).statusCode).toBe(400);
expect((error as OpenAIServiceError).retryable).toBe(false);
}
});
it('should identify retryable errors correctly', async () => {
const retryableErrors = [
{ status: 429, expected: true }, // Rate limit
{ status: 500, expected: true }, // Server error
{ status: 503, expected: true }, // Service unavailable
{ code: 'ECONNRESET', expected: true }, // Network error
{ status: 400, expected: false }, // Client error
{ status: 401, expected: false }, // Unauthorized
];
for (const { status, code, expected } of retryableErrors) {
const error = new Error('Test error');
if (status) (error as any).status = status;
if (code) (error as any).code = code;
mockClient.chat = {
completions: {
create: jest.fn().mockRejectedValue(error),
},
} as any;
try {
await service.complete([{ role: 'user', content: 'Test' }]);
} catch (error) {
expect((error as OpenAIServiceError).retryable).toBe(expected);
}
}
});
});
});