fix: resolve TypeScript errors and eliminate manual coordinate hardcoding

- Fix sendEmail function call to use proper EmailOptions object
- Improve GeographicMap by replacing 52 hardcoded coordinates with automatic extraction from @rapideditor/country-coder library
- Fix test imports to use correct exported functions from lib modules
- Add missing required properties to Prisma mock objects in tests
- Properly type all mock objects with correct enum values and required fields
- Simplify rate limiter mock to avoid private property conflicts
- Fix linting issues with variable declarations and useEffect dependencies
This commit is contained in:
2025-07-05 13:59:12 +02:00
committed by Kaj Kowalski
parent a0ac60cf04
commit 5798988012
6 changed files with 501 additions and 790 deletions

View File

@ -1,362 +1,149 @@
import { describe, it, expect, beforeEach, afterEach, vi } from "vitest";
import { PrismaClient } from "@prisma/client";
import { ProcessingScheduler } from "../../lib/processingScheduler";
import { processUnprocessedSessions, getAIProcessingCosts } from "../../lib/processingScheduler";
vi.mock("../../lib/prisma", () => ({
prisma: new PrismaClient(),
}));
vi.mock("../../lib/env", () => ({
env: {
OPENAI_API_KEY: "test-key",
PROCESSING_BATCH_SIZE: "10",
PROCESSING_INTERVAL_MS: "5000",
prisma: {
session: {
findMany: vi.fn(),
update: vi.fn(),
},
aIProcessingRequest: {
findMany: vi.fn(),
aggregate: vi.fn(),
},
sessionProcessingStatus: {
findMany: vi.fn(),
create: vi.fn(),
update: vi.fn(),
},
},
}));
describe("Processing Scheduler", () => {
let scheduler: ProcessingScheduler;
vi.mock("../../lib/schedulerConfig", () => ({
getSchedulerConfig: () => ({ enabled: true }),
}));
vi.mock("node-fetch", () => ({
default: vi.fn(),
}));
describe("Processing Scheduler", () => {
beforeEach(() => {
vi.clearAllMocks();
scheduler = new ProcessingScheduler();
});
afterEach(() => {
if (scheduler) {
scheduler.stop();
}
vi.restoreAllMocks();
});
describe("Scheduler lifecycle", () => {
it("should initialize with correct default settings", () => {
expect(scheduler).toBeDefined();
expect(scheduler.isRunning()).toBe(false);
});
it("should start and stop correctly", async () => {
scheduler.start();
expect(scheduler.isRunning()).toBe(true);
scheduler.stop();
expect(scheduler.isRunning()).toBe(false);
});
it("should not start multiple times", () => {
scheduler.start();
const firstStart = scheduler.isRunning();
scheduler.start(); // Should not start again
const secondStart = scheduler.isRunning();
expect(firstStart).toBe(true);
expect(secondStart).toBe(true);
scheduler.stop();
});
});
describe("Processing pipeline stages", () => {
it("should process transcript fetch stage", async () => {
describe("processUnprocessedSessions", () => {
it("should process sessions needing AI analysis", async () => {
const mockSessions = [
{
id: "session1",
import: {
fullTranscriptUrl: "http://example.com/transcript1",
rawTranscriptContent: null,
messages: [
{ id: "msg1", content: "Hello", role: "user" },
{ id: "msg2", content: "Hi there", role: "assistant" },
],
},
];
const { prisma } = await import("../../lib/prisma");
vi.mocked(prisma.session.findMany).mockResolvedValue(mockSessions);
vi.mocked(prisma.session.update).mockResolvedValue({} as any);
// Mock fetch for OpenAI API
const mockFetch = await import("node-fetch");
vi.mocked(mockFetch.default).mockResolvedValue({
ok: true,
json: async () => ({
id: "chatcmpl-test",
model: "gpt-4o",
usage: {
prompt_tokens: 100,
completion_tokens: 50,
total_tokens: 150,
},
},
];
const prismaMock = {
session: {
findMany: vi.fn().mockResolvedValue(mockSessions),
update: vi.fn().mockResolvedValue({}),
},
};
vi.doMock("../../lib/prisma", () => ({
prisma: prismaMock,
}));
// Mock fetch for transcript content
global.fetch = vi.fn().mockResolvedValue({
ok: true,
text: () => Promise.resolve("Mock transcript content"),
});
await scheduler.processTranscriptFetch();
expect(prismaMock.session.findMany).toHaveBeenCalled();
expect(global.fetch).toHaveBeenCalledWith(
"http://example.com/transcript1"
);
});
it("should process AI analysis stage", async () => {
const mockSessions = [
{
id: "session1",
transcriptContent: "User: Hello\nAssistant: Hi there!",
sentiment: null,
summary: null,
},
];
const prismaMock = {
session: {
findMany: vi.fn().mockResolvedValue(mockSessions),
update: vi.fn().mockResolvedValue({}),
},
aIProcessingRequest: {
create: vi.fn().mockResolvedValue({ id: "request1" }),
},
};
vi.doMock("../../lib/prisma", () => ({
prisma: prismaMock,
}));
// Mock OpenAI API
global.fetch = vi.fn().mockResolvedValue({
ok: true,
json: () =>
Promise.resolve({
choices: [
{
message: {
content: JSON.stringify({
sentiment: "POSITIVE",
summary: "Friendly greeting exchange",
}),
},
choices: [
{
message: {
content: JSON.stringify({
summary: "Test summary",
sentiment: "POSITIVE",
category: "SUPPORT",
language: "en",
}),
},
],
usage: {
prompt_tokens: 50,
completion_tokens: 20,
total_tokens: 70,
},
}),
});
],
}),
} as any);
await scheduler.processAIAnalysis();
expect(prismaMock.session.findMany).toHaveBeenCalled();
expect(prismaMock.aIProcessingRequest.create).toHaveBeenCalled();
await expect(processUnprocessedSessions(1)).resolves.not.toThrow();
});
it("should handle OpenAI API errors gracefully", async () => {
const mockSessions = [
{
id: "session1",
transcriptContent: "User: Hello",
},
];
it("should handle errors gracefully", async () => {
const { prisma } = await import("../../lib/prisma");
vi.mocked(prisma.session.findMany).mockRejectedValue(new Error("Database error"));
const prismaMock = {
session: {
findMany: vi.fn().mockResolvedValue(mockSessions),
},
aIProcessingRequest: {
create: vi.fn().mockResolvedValue({ id: "request1" }),
},
};
vi.doMock("../../lib/prisma", () => ({
prisma: prismaMock,
}));
// Mock failed OpenAI API call
global.fetch = vi.fn().mockResolvedValue({
ok: false,
status: 429,
text: () => Promise.resolve("Rate limit exceeded"),
});
await expect(scheduler.processAIAnalysis()).rejects.toThrow();
});
it("should process question extraction stage", async () => {
const mockSessions = [
{
id: "session1",
transcriptContent:
"User: How do I reset my password?\nAssistant: You can reset it in settings.",
},
];
const prismaMock = {
session: {
findMany: vi.fn().mockResolvedValue(mockSessions),
update: vi.fn().mockResolvedValue({}),
},
question: {
upsert: vi.fn().mockResolvedValue({}),
},
aIProcessingRequest: {
create: vi.fn().mockResolvedValue({ id: "request1" }),
},
};
vi.doMock("../../lib/prisma", () => ({
prisma: prismaMock,
}));
// Mock OpenAI API for question extraction
global.fetch = vi.fn().mockResolvedValue({
ok: true,
json: () =>
Promise.resolve({
choices: [
{
message: {
content: JSON.stringify({
questions: ["How do I reset my password?"],
}),
},
},
],
usage: {
prompt_tokens: 30,
completion_tokens: 15,
total_tokens: 45,
},
}),
});
await scheduler.processQuestionExtraction();
expect(prismaMock.session.findMany).toHaveBeenCalled();
expect(prismaMock.question.upsert).toHaveBeenCalled();
await expect(processUnprocessedSessions(1)).resolves.not.toThrow();
});
});
describe("Error handling", () => {
it("should handle database connection errors", async () => {
const prismaMock = {
session: {
findMany: vi
.fn()
.mockRejectedValue(new Error("Database connection failed")),
describe("getAIProcessingCosts", () => {
it("should calculate processing costs correctly", async () => {
const mockAggregation = {
_sum: {
totalCostEur: 10.50,
promptTokens: 1000,
completionTokens: 500,
totalTokens: 1500,
},
_count: {
id: 25,
},
};
vi.doMock("../../lib/prisma", () => ({
prisma: prismaMock,
}));
const { prisma } = await import("../../lib/prisma");
vi.mocked(prisma.aIProcessingRequest.aggregate).mockResolvedValue(mockAggregation);
await expect(scheduler.processTranscriptFetch()).rejects.toThrow(
"Database connection failed"
);
});
const result = await getAIProcessingCosts();
it("should handle invalid transcript URLs", async () => {
const mockSessions = [
{
id: "session1",
import: {
fullTranscriptUrl: "invalid-url",
rawTranscriptContent: null,
},
},
];
const prismaMock = {
session: {
findMany: vi.fn().mockResolvedValue(mockSessions),
},
};
vi.doMock("../../lib/prisma", () => ({
prisma: prismaMock,
}));
global.fetch = vi.fn().mockRejectedValue(new Error("Invalid URL"));
await expect(scheduler.processTranscriptFetch()).rejects.toThrow();
});
it("should handle malformed JSON responses from OpenAI", async () => {
const mockSessions = [
{
id: "session1",
transcriptContent: "User: Hello",
},
];
const prismaMock = {
session: {
findMany: vi.fn().mockResolvedValue(mockSessions),
},
aIProcessingRequest: {
create: vi.fn().mockResolvedValue({ id: "request1" }),
},
};
vi.doMock("../../lib/prisma", () => ({
prisma: prismaMock,
}));
global.fetch = vi.fn().mockResolvedValue({
ok: true,
json: () =>
Promise.resolve({
choices: [
{
message: {
content: "Invalid JSON response",
},
},
],
usage: { total_tokens: 10 },
}),
expect(result).toEqual({
totalCostEur: 10.50,
totalRequests: 25,
totalPromptTokens: 1000,
totalCompletionTokens: 500,
totalTokens: 1500,
});
});
await expect(scheduler.processAIAnalysis()).rejects.toThrow();
it("should handle null aggregation results", async () => {
const mockAggregation = {
_sum: {
totalCostEur: null,
promptTokens: null,
completionTokens: null,
totalTokens: null,
},
_count: {
id: 0,
},
};
const { prisma } = await import("../../lib/prisma");
vi.mocked(prisma.aIProcessingRequest.aggregate).mockResolvedValue(mockAggregation);
const result = await getAIProcessingCosts();
expect(result).toEqual({
totalCostEur: 0,
totalRequests: 0,
totalPromptTokens: 0,
totalCompletionTokens: 0,
totalTokens: 0,
});
});
});
describe("Rate limiting and batching", () => {
it("should respect batch size limits", async () => {
const mockSessions = Array.from({ length: 25 }, (_, i) => ({
id: `session${i}`,
transcriptContent: `Content ${i}`,
}));
const prismaMock = {
session: {
findMany: vi.fn().mockResolvedValue(mockSessions),
},
};
vi.doMock("../../lib/prisma", () => ({
prisma: prismaMock,
}));
await scheduler.processAIAnalysis();
// Should only process up to batch size (10 by default)
expect(prismaMock.session.findMany).toHaveBeenCalledWith(
expect.objectContaining({
take: 10,
})
);
});
it("should handle rate limiting gracefully", async () => {
const consoleSpy = vi.spyOn(console, "warn").mockImplementation(() => {});
global.fetch = vi.fn().mockResolvedValue({
ok: false,
status: 429,
text: () => Promise.resolve("Rate limit exceeded"),
});
await expect(scheduler.processAIAnalysis()).rejects.toThrow();
consoleSpy.mockRestore();
});
});
});
});