feat: complete tRPC integration and fix platform UI issues

- Implement comprehensive tRPC setup with type-safe API
- Create tRPC routers for dashboard, admin, and auth endpoints
- Migrate frontend components to use tRPC client
- Fix platform dashboard Settings button functionality
- Add platform settings page with profile and security management
- Create OpenAI API mocking infrastructure for cost-safe testing
- Update tests to work with new tRPC architecture
- Sync database schema to fix AIBatchRequest table errors
This commit is contained in:
2025-07-11 15:37:53 +02:00
committed by Kaj Kowalski
parent f2a3d87636
commit fa7e815a3b
38 changed files with 4285 additions and 518 deletions

View File

@ -10,8 +10,14 @@
* - Improved error handling and retry mechanisms
*/
import {
AIBatchRequestStatus,
type AIProcessingRequest,
AIRequestStatus,
} from "@prisma/client";
import { env } from "./env";
import { openAIMock } from "./mocks/openai-mock-server";
import { prisma } from "./prisma";
import { AIBatchRequestStatus, AIRequestStatus, type AIProcessingRequest } from "@prisma/client";
/**
* Configuration for batch processing
@ -61,7 +67,15 @@ interface OpenAIBatchResponse {
};
input_file_id: string;
completion_window: string;
status: "validating" | "failed" | "in_progress" | "finalizing" | "completed" | "expired" | "cancelling" | "cancelled";
status:
| "validating"
| "failed"
| "in_progress"
| "finalizing"
| "completed"
| "expired"
| "cancelling"
| "cancelled";
output_file_id?: string;
error_file_id?: string;
created_at: number;
@ -109,18 +123,20 @@ export async function getPendingBatchRequests(
orderBy: {
requestedAt: "asc",
},
}) as Promise<(AIProcessingRequest & {
session: {
id: string;
companyId: string;
messages: Array<{
}) as Promise<
(AIProcessingRequest & {
session: {
id: string;
role: string;
content: string;
order: number;
}>;
} | null;
})[]>;
companyId: string;
messages: Array<{
id: string;
role: string;
content: string;
order: number;
}>;
} | null;
})[]
>;
}
/**
@ -135,7 +151,9 @@ export async function createBatchRequest(
}
if (requests.length > BATCH_CONFIG.MAX_REQUESTS_PER_BATCH) {
throw new Error(`Batch size ${requests.length} exceeds maximum of ${BATCH_CONFIG.MAX_REQUESTS_PER_BATCH}`);
throw new Error(
`Batch size ${requests.length} exceeds maximum of ${BATCH_CONFIG.MAX_REQUESTS_PER_BATCH}`
);
}
// Create batch requests in OpenAI format
@ -152,7 +170,9 @@ export async function createBatchRequest(
},
{
role: "user",
content: formatMessagesForProcessing((request as any).session?.messages || []),
content: formatMessagesForProcessing(
(request as any).session?.messages || []
),
},
],
temperature: 0.1,
@ -230,7 +250,9 @@ export async function checkBatchStatuses(companyId: string): Promise<void> {
/**
* Process completed batches and extract results
*/
export async function processCompletedBatches(companyId: string): Promise<void> {
export async function processCompletedBatches(
companyId: string
): Promise<void> {
const completedBatches = await prisma.aIBatchRequest.findMany({
where: {
companyId,
@ -262,17 +284,31 @@ export async function processCompletedBatches(companyId: string): Promise<void>
}
/**
* Helper function to upload file content to OpenAI
* Helper function to upload file content to OpenAI (real or mock)
*/
async function uploadFileToOpenAI(content: string): Promise<{ id: string }> {
if (env.OPENAI_MOCK_MODE) {
console.log(
`[OpenAI Mock] Uploading batch file with ${content.split("\n").length} requests`
);
return openAIMock.mockUploadFile({
file: content,
purpose: "batch",
});
}
const formData = new FormData();
formData.append("file", new Blob([content], { type: "application/jsonl" }), "batch_requests.jsonl");
formData.append(
"file",
new Blob([content], { type: "application/jsonl" }),
"batch_requests.jsonl"
);
formData.append("purpose", "batch");
const response = await fetch("https://api.openai.com/v1/files", {
method: "POST",
headers: {
"Authorization": `Bearer ${process.env.OPENAI_API_KEY}`,
Authorization: `Bearer ${process.env.OPENAI_API_KEY}`,
},
body: formData,
});
@ -285,13 +321,24 @@ async function uploadFileToOpenAI(content: string): Promise<{ id: string }> {
}
/**
* Helper function to create a batch request on OpenAI
* Helper function to create a batch request on OpenAI (real or mock)
*/
async function createOpenAIBatch(inputFileId: string): Promise<OpenAIBatchResponse> {
async function createOpenAIBatch(
inputFileId: string
): Promise<OpenAIBatchResponse> {
if (env.OPENAI_MOCK_MODE) {
console.log(`[OpenAI Mock] Creating batch with input file ${inputFileId}`);
return openAIMock.mockCreateBatch({
input_file_id: inputFileId,
endpoint: "/v1/chat/completions",
completion_window: "24h",
});
}
const response = await fetch("https://api.openai.com/v1/batches", {
method: "POST",
headers: {
"Authorization": `Bearer ${process.env.OPENAI_API_KEY}`,
Authorization: `Bearer ${process.env.OPENAI_API_KEY}`,
"Content-Type": "application/json",
},
body: JSON.stringify({
@ -309,13 +356,20 @@ async function createOpenAIBatch(inputFileId: string): Promise<OpenAIBatchRespon
}
/**
* Helper function to get batch status from OpenAI
* Helper function to get batch status from OpenAI (real or mock)
*/
async function getOpenAIBatchStatus(batchId: string): Promise<OpenAIBatchResponse> {
async function getOpenAIBatchStatus(
batchId: string
): Promise<OpenAIBatchResponse> {
if (env.OPENAI_MOCK_MODE) {
console.log(`[OpenAI Mock] Getting batch status for ${batchId}`);
return openAIMock.mockGetBatch(batchId);
}
const response = await fetch(`https://api.openai.com/v1/batches/${batchId}`, {
method: "GET",
headers: {
"Authorization": `Bearer ${process.env.OPENAI_API_KEY}`,
Authorization: `Bearer ${process.env.OPENAI_API_KEY}`,
},
});
@ -329,7 +383,10 @@ async function getOpenAIBatchStatus(batchId: string): Promise<OpenAIBatchRespons
/**
* Update batch status in our database based on OpenAI response
*/
async function updateBatchStatus(batchId: string, openAIResponse: OpenAIBatchResponse): Promise<void> {
async function updateBatchStatus(
batchId: string,
openAIResponse: OpenAIBatchResponse
): Promise<void> {
const statusMapping: Record<string, AIBatchRequestStatus> = {
validating: AIBatchRequestStatus.VALIDATING,
failed: AIBatchRequestStatus.FAILED,
@ -340,7 +397,8 @@ async function updateBatchStatus(batchId: string, openAIResponse: OpenAIBatchRes
cancelled: AIBatchRequestStatus.CANCELLED,
};
const ourStatus = statusMapping[openAIResponse.status] || AIBatchRequestStatus.FAILED;
const ourStatus =
statusMapping[openAIResponse.status] || AIBatchRequestStatus.FAILED;
await prisma.aIBatchRequest.update({
where: { id: batchId },
@ -348,7 +406,9 @@ async function updateBatchStatus(batchId: string, openAIResponse: OpenAIBatchRes
status: ourStatus,
outputFileId: openAIResponse.output_file_id,
errorFileId: openAIResponse.error_file_id,
completedAt: openAIResponse.completed_at ? new Date(openAIResponse.completed_at * 1000) : null,
completedAt: openAIResponse.completed_at
? new Date(openAIResponse.completed_at * 1000)
: null,
},
});
}
@ -369,7 +429,7 @@ async function processBatchResults(batch: {
const results = await downloadOpenAIFile(batch.outputFileId);
// Parse JSONL results
const resultLines = results.split("\n").filter(line => line.trim());
const resultLines = results.split("\n").filter((line) => line.trim());
for (const line of resultLines) {
try {
@ -378,10 +438,16 @@ async function processBatchResults(batch: {
if (result.response?.body?.choices?.[0]?.message?.content) {
// Process successful result
await updateProcessingRequestWithResult(requestId, result.response.body);
await updateProcessingRequestWithResult(
requestId,
result.response.body
);
} else {
// Handle error result
await markProcessingRequestAsFailed(requestId, result.error?.message || "Unknown error");
await markProcessingRequestAsFailed(
requestId,
result.error?.message || "Unknown error"
);
}
} catch (error) {
console.error("Failed to process batch result line:", error);
@ -399,15 +465,23 @@ async function processBatchResults(batch: {
}
/**
* Download file content from OpenAI
* Download file content from OpenAI (real or mock)
*/
async function downloadOpenAIFile(fileId: string): Promise<string> {
const response = await fetch(`https://api.openai.com/v1/files/${fileId}/content`, {
method: "GET",
headers: {
"Authorization": `Bearer ${process.env.OPENAI_API_KEY}`,
},
});
if (env.OPENAI_MOCK_MODE) {
console.log(`[OpenAI Mock] Downloading file content for ${fileId}`);
return openAIMock.mockGetFileContent(fileId);
}
const response = await fetch(
`https://api.openai.com/v1/files/${fileId}/content`,
{
method: "GET",
headers: {
Authorization: `Bearer ${process.env.OPENAI_API_KEY}`,
},
}
);
if (!response.ok) {
throw new Error(`Failed to download file: ${response.statusText}`);
@ -419,18 +493,21 @@ async function downloadOpenAIFile(fileId: string): Promise<string> {
/**
* Update processing request with successful AI result
*/
async function updateProcessingRequestWithResult(requestId: string, aiResponse: {
usage: {
prompt_tokens: number;
completion_tokens: number;
total_tokens: number;
};
choices: Array<{
message: {
content: string;
async function updateProcessingRequestWithResult(
requestId: string,
aiResponse: {
usage: {
prompt_tokens: number;
completion_tokens: number;
total_tokens: number;
};
}>;
}): Promise<void> {
choices: Array<{
message: {
content: string;
};
}>;
}
): Promise<void> {
const usage = aiResponse.usage;
const content = aiResponse.choices[0].message.content;
@ -469,14 +546,20 @@ async function updateProcessingRequestWithResult(requestId: string, aiResponse:
}
} catch (error) {
console.error(`Failed to parse AI result for request ${requestId}:`, error);
await markProcessingRequestAsFailed(requestId, "Failed to parse AI response");
await markProcessingRequestAsFailed(
requestId,
"Failed to parse AI response"
);
}
}
/**
* Mark processing request as failed
*/
async function markProcessingRequestAsFailed(requestId: string, errorMessage: string): Promise<void> {
async function markProcessingRequestAsFailed(
requestId: string,
errorMessage: string
): Promise<void> {
await prisma.aIProcessingRequest.update({
where: { id: requestId },
data: {
@ -493,9 +576,12 @@ async function markProcessingRequestAsFailed(requestId: string, errorMessage: st
*/
function getSystemPromptForProcessingType(processingType: string): string {
const prompts = {
sentiment_analysis: "Analyze the sentiment of this conversation and respond with JSON containing: {\"sentiment\": \"POSITIVE|NEUTRAL|NEGATIVE\"}",
categorization: "Categorize this conversation and respond with JSON containing: {\"category\": \"CATEGORY_NAME\"}",
summary: "Summarize this conversation and respond with JSON containing: {\"summary\": \"Brief summary\"}",
sentiment_analysis:
'Analyze the sentiment of this conversation and respond with JSON containing: {"sentiment": "POSITIVE|NEUTRAL|NEGATIVE"}',
categorization:
'Categorize this conversation and respond with JSON containing: {"category": "CATEGORY_NAME"}',
summary:
'Summarize this conversation and respond with JSON containing: {"summary": "Brief summary"}',
full_analysis: `Analyze this conversation for sentiment, category, and provide a summary. Respond with JSON:
{
"sentiment": "POSITIVE|NEUTRAL|NEGATIVE",
@ -505,19 +591,21 @@ function getSystemPromptForProcessingType(processingType: string): string {
}`,
};
return prompts[processingType as keyof typeof prompts] || prompts.full_analysis;
return (
prompts[processingType as keyof typeof prompts] || prompts.full_analysis
);
}
/**
* Format session messages for AI processing
*/
function formatMessagesForProcessing(messages: Array<{
role: string;
content: string;
}>): string {
return messages
.map((msg) => `${msg.role}: ${msg.content}`)
.join("\n");
function formatMessagesForProcessing(
messages: Array<{
role: string;
content: string;
}>
): string {
return messages.map((msg) => `${msg.role}: ${msg.content}`).join("\n");
}
/**
@ -538,10 +626,13 @@ export async function getBatchProcessingStats(companyId: string) {
});
return {
batchStats: stats.reduce((acc, stat) => {
acc[stat.status] = stat._count;
return acc;
}, {} as Record<string, number>),
batchStats: stats.reduce(
(acc, stat) => {
acc[stat.status] = stat._count;
return acc;
},
{} as Record<string, number>
),
pendingRequests,
};
}
}

View File

@ -9,11 +9,11 @@
import cron, { type ScheduledTask } from "node-cron";
import {
getPendingBatchRequests,
createBatchRequest,
checkBatchStatuses,
createBatchRequest,
getBatchProcessingStats,
getPendingBatchRequests,
processCompletedBatches,
getBatchProcessingStats
} from "./batchProcessor";
import { prisma } from "./prisma";
import { getSchedulerConfig } from "./schedulerConfig";
@ -157,17 +157,24 @@ async function createBatchesForCompany(companyId: string): Promise<void> {
}
// Check if we should create a batch
const shouldCreateBatch = await shouldCreateBatchForCompany(companyId, pendingRequests.length);
const shouldCreateBatch = await shouldCreateBatchForCompany(
companyId,
pendingRequests.length
);
if (!shouldCreateBatch) {
return; // Wait for more requests or more time
}
console.log(`Creating batch for company ${companyId} with ${pendingRequests.length} requests`);
console.log(
`Creating batch for company ${companyId} with ${pendingRequests.length} requests`
);
const batchId = await createBatchRequest(companyId, pendingRequests);
console.log(`Successfully created batch ${batchId} for company ${companyId}`);
console.log(
`Successfully created batch ${batchId} for company ${companyId}`
);
} catch (error) {
console.error(`Failed to create batch for company ${companyId}:`, error);
}
@ -176,7 +183,10 @@ async function createBatchesForCompany(companyId: string): Promise<void> {
/**
* Determine if a batch should be created for a company
*/
async function shouldCreateBatchForCompany(companyId: string, pendingCount: number): Promise<boolean> {
async function shouldCreateBatchForCompany(
companyId: string,
pendingCount: number
): Promise<boolean> {
// Always create if we have enough requests
if (pendingCount >= SCHEDULER_CONFIG.MIN_BATCH_SIZE) {
return true;
@ -281,4 +291,4 @@ export function getBatchSchedulerStatus() {
processResultsRunning: !!processResultsTask,
config: SCHEDULER_CONFIG,
};
}
}

View File

@ -81,6 +81,7 @@ export const env = {
// OpenAI
OPENAI_API_KEY: parseEnvValue(process.env.OPENAI_API_KEY) || "",
OPENAI_MOCK_MODE: parseEnvValue(process.env.OPENAI_MOCK_MODE) === "true",
// Scheduler Configuration
SCHEDULER_ENABLED: parseEnvValue(process.env.SCHEDULER_ENABLED) === "true",
@ -135,8 +136,14 @@ export function validateEnv(): { valid: boolean; errors: string[] } {
errors.push("NEXTAUTH_SECRET is required");
}
if (!env.OPENAI_API_KEY && env.NODE_ENV === "production") {
errors.push("OPENAI_API_KEY is required in production");
if (
!env.OPENAI_API_KEY &&
env.NODE_ENV === "production" &&
!env.OPENAI_MOCK_MODE
) {
errors.push(
"OPENAI_API_KEY is required in production (unless OPENAI_MOCK_MODE is enabled)"
);
}
return {
@ -174,6 +181,7 @@ export function logEnvConfig(): void {
console.log(` NODE_ENV: ${env.NODE_ENV}`);
console.log(` NEXTAUTH_URL: ${env.NEXTAUTH_URL}`);
console.log(` SCHEDULER_ENABLED: ${env.SCHEDULER_ENABLED}`);
console.log(` OPENAI_MOCK_MODE: ${env.OPENAI_MOCK_MODE}`);
console.log(` PORT: ${env.PORT}`);
if (env.SCHEDULER_ENABLED) {

208
lib/hooks/useTRPC.ts Normal file
View File

@ -0,0 +1,208 @@
/**
* Custom hooks for tRPC usage
*
* This file provides convenient hooks for common tRPC operations
* with proper error handling and loading states.
*/
import { trpc } from "@/lib/trpc-client";
/**
* Hook for dashboard session management
*/
export function useDashboardSessions(filters?: {
search?: string;
sentiment?: string;
category?: string;
startDate?: string;
endDate?: string;
page?: number;
limit?: number;
}) {
return trpc.dashboard.getSessions.useQuery(
{
search: filters?.search,
sentiment: filters?.sentiment as
| "POSITIVE"
| "NEUTRAL"
| "NEGATIVE"
| undefined,
category: filters?.category as
| "SCHEDULE_HOURS"
| "LEAVE_VACATION"
| "SICK_LEAVE_RECOVERY"
| "SALARY_COMPENSATION"
| "CONTRACT_HOURS"
| "ONBOARDING"
| "OFFBOARDING"
| "WORKWEAR_STAFF_PASS"
| "TEAM_CONTACTS"
| "PERSONAL_QUESTIONS"
| "ACCESS_LOGIN"
| "SOCIAL_QUESTIONS"
| "UNRECOGNIZED_OTHER"
| undefined,
startDate: filters?.startDate,
endDate: filters?.endDate,
page: filters?.page || 1,
limit: filters?.limit || 20,
},
{
// Cache for 30 seconds
staleTime: 30 * 1000,
// Keep in background for 5 minutes
gcTime: 5 * 60 * 1000,
// Refetch when component mounts if data is stale
refetchOnMount: true,
// Don't refetch on window focus to avoid excessive API calls
refetchOnWindowFocus: false,
}
);
}
/**
* Hook for dashboard overview statistics
*/
export function useDashboardOverview(dateRange?: {
startDate?: string;
endDate?: string;
}) {
return trpc.dashboard.getOverview.useQuery(
{
startDate: dateRange?.startDate,
endDate: dateRange?.endDate,
},
{
staleTime: 2 * 60 * 1000, // 2 minutes
gcTime: 10 * 60 * 1000, // 10 minutes
refetchOnMount: true,
refetchOnWindowFocus: false,
}
);
}
/**
* Hook for top questions
*/
export function useTopQuestions(options?: {
limit?: number;
startDate?: string;
endDate?: string;
}) {
return trpc.dashboard.getTopQuestions.useQuery(
{
limit: options?.limit || 10,
startDate: options?.startDate,
endDate: options?.endDate,
},
{
staleTime: 5 * 60 * 1000, // 5 minutes
gcTime: 15 * 60 * 1000, // 15 minutes
refetchOnMount: true,
refetchOnWindowFocus: false,
}
);
}
/**
* Hook for geographic distribution
*/
export function useGeographicDistribution(dateRange?: {
startDate?: string;
endDate?: string;
}) {
return trpc.dashboard.getGeographicDistribution.useQuery(
{
startDate: dateRange?.startDate,
endDate: dateRange?.endDate,
},
{
staleTime: 10 * 60 * 1000, // 10 minutes
gcTime: 30 * 60 * 1000, // 30 minutes
refetchOnMount: true,
refetchOnWindowFocus: false,
}
);
}
/**
* Hook for AI processing metrics
*/
export function useAIMetrics(dateRange?: {
startDate?: string;
endDate?: string;
}) {
return trpc.dashboard.getAIMetrics.useQuery(
{
startDate: dateRange?.startDate,
endDate: dateRange?.endDate,
},
{
staleTime: 2 * 60 * 1000, // 2 minutes
gcTime: 10 * 60 * 1000, // 10 minutes
refetchOnMount: true,
refetchOnWindowFocus: false,
}
);
}
/**
* Hook for user authentication profile
*/
export function useUserProfile() {
return trpc.auth.getProfile.useQuery(undefined, {
staleTime: 5 * 60 * 1000, // 5 minutes
gcTime: 30 * 60 * 1000, // 30 minutes
refetchOnMount: false,
refetchOnWindowFocus: false,
// Only fetch if user is likely authenticated
retry: 1,
});
}
/**
* Hook for admin user management
*/
export function useAdminUsers(options?: {
page?: number;
limit?: number;
search?: string;
}) {
return trpc.admin.getUsers.useQuery(
{
page: options?.page || 1,
limit: options?.limit || 20,
search: options?.search,
},
{
staleTime: 60 * 1000, // 1 minute
gcTime: 5 * 60 * 1000, // 5 minutes
refetchOnMount: true,
refetchOnWindowFocus: false,
}
);
}
/**
* Hook for company settings
*/
export function useCompanySettings() {
return trpc.admin.getCompanySettings.useQuery(undefined, {
staleTime: 5 * 60 * 1000, // 5 minutes
gcTime: 30 * 60 * 1000, // 30 minutes
refetchOnMount: true,
refetchOnWindowFocus: false,
});
}
/**
* Hook for system statistics
*/
export function useSystemStats() {
return trpc.admin.getSystemStats.useQuery(undefined, {
staleTime: 30 * 1000, // 30 seconds
gcTime: 5 * 60 * 1000, // 5 minutes
refetchOnMount: true,
refetchOnWindowFocus: false,
});
}

View File

@ -0,0 +1,416 @@
/**
* OpenAI API Mock Server
*
* Provides a drop-in replacement for OpenAI API calls during development
* and testing to prevent unexpected costs and enable offline development.
*/
import {
calculateMockCost,
generateBatchResponse,
generateSessionAnalysisResponse,
MOCK_RESPONSE_GENERATORS,
type MockBatchResponse,
type MockChatCompletion,
type MockResponseType,
} from "./openai-responses";
interface MockOpenAIConfig {
enabled: boolean;
baseDelay: number; // Base delay in ms to simulate API latency
randomDelay: number; // Additional random delay (0 to this value)
errorRate: number; // Probability of simulated errors (0.0 to 1.0)
logRequests: boolean; // Whether to log mock requests
}
class OpenAIMockServer {
private config: MockOpenAIConfig;
private totalCost = 0;
private requestCount = 0;
private activeBatches: Map<string, MockBatchResponse> = new Map();
constructor(config: Partial<MockOpenAIConfig> = {}) {
this.config = {
enabled: process.env.OPENAI_MOCK_MODE === "true",
baseDelay: 500, // 500ms base delay
randomDelay: 1000, // 0-1000ms additional delay
errorRate: 0.02, // 2% error rate
logRequests: process.env.NODE_ENV === "development",
...config,
};
}
/**
* Check if mock mode is enabled
*/
isEnabled(): boolean {
return this.config.enabled;
}
/**
* Simulate network delay
*/
private async simulateDelay(): Promise<void> {
const delay =
this.config.baseDelay + Math.random() * this.config.randomDelay;
await new Promise((resolve) => setTimeout(resolve, delay));
}
/**
* Simulate random API errors
*/
private shouldSimulateError(): boolean {
return Math.random() < this.config.errorRate;
}
/**
* Log mock requests for debugging
*/
private logRequest(endpoint: string, data: any): void {
if (this.config.logRequests) {
console.log(`[OpenAI Mock] ${endpoint}:`, JSON.stringify(data, null, 2));
}
}
/**
* Check if this is a session analysis request (comprehensive JSON format)
*/
private isSessionAnalysisRequest(prompt: string): boolean {
const promptLower = prompt.toLowerCase();
return (
promptLower.includes("session_id") &&
(promptLower.includes("sentiment") ||
promptLower.includes("category") ||
promptLower.includes("language"))
);
}
/**
* Extract processing type from prompt
*/
private extractProcessingType(prompt: string): MockResponseType {
const promptLower = prompt.toLowerCase();
if (
promptLower.includes("sentiment") ||
promptLower.includes("positive") ||
promptLower.includes("negative")
) {
return "sentiment";
}
if (promptLower.includes("category") || promptLower.includes("classify")) {
return "category";
}
if (promptLower.includes("summary") || promptLower.includes("summarize")) {
return "summary";
}
if (promptLower.includes("question") || promptLower.includes("extract")) {
return "questions";
}
// Default to sentiment analysis
return "sentiment";
}
/**
* Mock chat completions endpoint
*/
async mockChatCompletion(request: {
model: string;
messages: Array<{ role: string; content: string }>;
temperature?: number;
max_tokens?: number;
}): Promise<MockChatCompletion> {
this.requestCount++;
await this.simulateDelay();
if (this.shouldSimulateError()) {
throw new Error("Mock OpenAI API error: Rate limit exceeded");
}
this.logRequest("/v1/chat/completions", request);
// Extract the user content to analyze
const userMessage =
request.messages.find((msg) => msg.role === "user")?.content || "";
const systemMessage =
request.messages.find((msg) => msg.role === "system")?.content || "";
let response: MockChatCompletion;
let processingType: string;
// Check if this is a comprehensive session analysis request
if (this.isSessionAnalysisRequest(systemMessage)) {
// Extract session ID from system message for session analysis
const sessionIdMatch = systemMessage.match(/"session_id":\s*"([^"]+)"/);
const sessionId = sessionIdMatch?.[1] || `mock-session-${Date.now()}`;
response = generateSessionAnalysisResponse(userMessage, sessionId);
processingType = "session_analysis";
} else {
// Use simple response generators for other types
const detectedType = this.extractProcessingType(
systemMessage + " " + userMessage
);
response = MOCK_RESPONSE_GENERATORS[detectedType](userMessage);
processingType = detectedType;
}
// Track costs
const cost = calculateMockCost(response.usage);
this.totalCost += cost;
if (this.config.logRequests) {
console.log(
`[OpenAI Mock] Generated ${processingType} response. Cost: $${cost.toFixed(6)}, Total: $${this.totalCost.toFixed(6)}`
);
}
return response;
}
/**
* Mock batch creation endpoint
*/
async mockCreateBatch(request: {
input_file_id: string;
endpoint: string;
completion_window: string;
metadata?: Record<string, string>;
}): Promise<MockBatchResponse> {
await this.simulateDelay();
if (this.shouldSimulateError()) {
throw new Error("Mock OpenAI API error: Invalid file format");
}
this.logRequest("/v1/batches", request);
const batch = generateBatchResponse("validating");
this.activeBatches.set(batch.id, batch);
// Simulate batch processing progression
this.simulateBatchProgression(batch.id);
return batch;
}
/**
* Mock batch retrieval endpoint
*/
async mockGetBatch(batchId: string): Promise<MockBatchResponse> {
await this.simulateDelay();
const batch = this.activeBatches.get(batchId);
if (!batch) {
throw new Error(`Mock OpenAI API error: Batch ${batchId} not found`);
}
this.logRequest(`/v1/batches/${batchId}`, { batchId });
return batch;
}
/**
* Mock file upload endpoint
*/
async mockUploadFile(request: {
file: string; // File content
purpose: string;
}): Promise<{
id: string;
object: string;
purpose: string;
filename: string;
}> {
await this.simulateDelay();
if (this.shouldSimulateError()) {
throw new Error("Mock OpenAI API error: File too large");
}
const fileId = `file-mock-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
this.logRequest("/v1/files", {
purpose: request.purpose,
size: request.file.length,
});
return {
id: fileId,
object: "file",
purpose: request.purpose,
filename: "batch_input.jsonl",
};
}
/**
* Mock file content retrieval
*/
async mockGetFileContent(fileId: string): Promise<string> {
await this.simulateDelay();
// Find the batch that owns this file
const batch = Array.from(this.activeBatches.values()).find(
(b) => b.output_file_id === fileId
);
if (!batch) {
throw new Error(`Mock OpenAI API error: File ${fileId} not found`);
}
// Generate mock batch results
const results: any = [];
for (let i = 0; i < batch.request_counts.total; i++) {
const response = MOCK_RESPONSE_GENERATORS.sentiment(`Sample text ${i}`);
results.push({
id: `batch-req-${i}`,
custom_id: `req-${i}`,
response: {
status_code: 200,
request_id: `req-${Date.now()}-${i}`,
body: response,
},
});
}
return results.map((r) => JSON.stringify(r)).join("\n");
}
/**
* Simulate batch processing progression over time
*/
private simulateBatchProgression(batchId: string): void {
const batch = this.activeBatches.get(batchId);
if (!batch) return;
// Validating -> In Progress (after 30 seconds)
setTimeout(() => {
const currentBatch = this.activeBatches.get(batchId);
if (currentBatch && currentBatch.status === "validating") {
currentBatch.status = "in_progress";
currentBatch.in_progress_at = Math.floor(Date.now() / 1000);
this.activeBatches.set(batchId, currentBatch);
}
}, 30000);
// In Progress -> Finalizing (after 2 minutes)
setTimeout(() => {
const currentBatch = this.activeBatches.get(batchId);
if (currentBatch && currentBatch.status === "in_progress") {
currentBatch.status = "finalizing";
currentBatch.finalizing_at = Math.floor(Date.now() / 1000);
this.activeBatches.set(batchId, currentBatch);
}
}, 120000);
// Finalizing -> Completed (after 3 minutes)
setTimeout(() => {
const currentBatch = this.activeBatches.get(batchId);
if (currentBatch && currentBatch.status === "finalizing") {
currentBatch.status = "completed";
currentBatch.completed_at = Math.floor(Date.now() / 1000);
currentBatch.output_file_id = `file-mock-output-${batchId}`;
currentBatch.request_counts.completed =
currentBatch.request_counts.total;
this.activeBatches.set(batchId, currentBatch);
}
}, 180000);
}
/**
* Get mock statistics
*/
getStats(): {
totalCost: number;
requestCount: number;
activeBatches: number;
isEnabled: boolean;
} {
return {
totalCost: this.totalCost,
requestCount: this.requestCount,
activeBatches: this.activeBatches.size,
isEnabled: this.config.enabled,
};
}
/**
* Reset statistics (useful for tests)
*/
resetStats(): void {
this.totalCost = 0;
this.requestCount = 0;
this.activeBatches.clear();
}
/**
* Update configuration
*/
updateConfig(newConfig: Partial<MockOpenAIConfig>): void {
this.config = { ...this.config, ...newConfig };
}
}
// Global instance
export const openAIMock = new OpenAIMockServer();
/**
* Drop-in replacement for OpenAI client that uses mocks when enabled
*/
export class MockOpenAIClient {
private realClient: any;
constructor(realClient: any) {
this.realClient = realClient;
}
get chat() {
return {
completions: {
create: async (params: any) => {
if (openAIMock.isEnabled()) {
return openAIMock.mockChatCompletion(params);
}
return this.realClient.chat.completions.create(params);
},
},
};
}
get batches() {
return {
create: async (params: any) => {
if (openAIMock.isEnabled()) {
return openAIMock.mockCreateBatch(params);
}
return this.realClient.batches.create(params);
},
retrieve: async (batchId: string) => {
if (openAIMock.isEnabled()) {
return openAIMock.mockGetBatch(batchId);
}
return this.realClient.batches.retrieve(batchId);
},
};
}
get files() {
return {
create: async (params: any) => {
if (openAIMock.isEnabled()) {
return openAIMock.mockUploadFile(params);
}
return this.realClient.files.create(params);
},
content: async (fileId: string) => {
if (openAIMock.isEnabled()) {
return openAIMock.mockGetFileContent(fileId);
}
return this.realClient.files.content(fileId);
},
};
}
}
export default openAIMock;

View File

@ -0,0 +1,583 @@
/**
* OpenAI API Mock Response Templates
*
* Provides realistic response templates for cost-safe testing
* and development without actual API calls.
*/
export interface MockChatCompletion {
id: string;
object: "chat.completion";
created: number;
model: string;
choices: Array<{
index: number;
message: {
role: "assistant";
content: string;
};
finish_reason: "stop" | "length" | "content_filter";
}>;
usage: {
prompt_tokens: number;
completion_tokens: number;
total_tokens: number;
};
}
export interface MockBatchResponse {
id: string;
object: "batch";
endpoint: string;
errors: {
object: "list";
data: Array<{
code: string;
message: string;
param?: string;
type: string;
}>;
};
input_file_id: string;
completion_window: string;
status:
| "validating"
| "in_progress"
| "finalizing"
| "completed"
| "failed"
| "expired"
| "cancelling"
| "cancelled";
output_file_id?: string;
error_file_id?: string;
created_at: number;
in_progress_at?: number;
expires_at?: number;
finalizing_at?: number;
completed_at?: number;
failed_at?: number;
expired_at?: number;
cancelling_at?: number;
cancelled_at?: number;
request_counts: {
total: number;
completed: number;
failed: number;
};
metadata?: Record<string, string>;
}
/**
* Generate realistic session analysis response matching the expected JSON schema
*/
export function generateSessionAnalysisResponse(
text: string,
sessionId: string
): MockChatCompletion {
// Extract session ID from the text if provided in system prompt
const sessionIdMatch = text.match(/"session_id":\s*"([^"]+)"/);
const extractedSessionId = sessionIdMatch?.[1] || sessionId;
// Simple sentiment analysis logic
const positiveWords = [
"good",
"great",
"excellent",
"happy",
"satisfied",
"wonderful",
"amazing",
"pleased",
"thanks",
];
const negativeWords = [
"bad",
"terrible",
"awful",
"unhappy",
"disappointed",
"frustrated",
"angry",
"upset",
"problem",
];
const words = text.toLowerCase().split(/\s+/);
const positiveCount = words.filter((word) =>
positiveWords.some((pos) => word.includes(pos))
).length;
const negativeCount = words.filter((word) =>
negativeWords.some((neg) => word.includes(neg))
).length;
let sentiment: "POSITIVE" | "NEUTRAL" | "NEGATIVE";
if (positiveCount > negativeCount) {
sentiment = "POSITIVE";
} else if (negativeCount > positiveCount) {
sentiment = "NEGATIVE";
} else {
sentiment = "NEUTRAL";
}
// Simple category classification
const categories: Record<string, string[]> = {
SCHEDULE_HOURS: ["schedule", "hours", "time", "shift", "working", "clock"],
LEAVE_VACATION: [
"vacation",
"leave",
"time off",
"holiday",
"pto",
"days off",
],
SICK_LEAVE_RECOVERY: [
"sick",
"ill",
"medical",
"health",
"doctor",
"recovery",
],
SALARY_COMPENSATION: [
"salary",
"pay",
"compensation",
"money",
"wage",
"payment",
],
CONTRACT_HOURS: ["contract", "agreement", "terms", "conditions"],
ONBOARDING: [
"onboard",
"new",
"start",
"first day",
"welcome",
"orientation",
],
OFFBOARDING: ["leaving", "quit", "resign", "last day", "exit", "farewell"],
WORKWEAR_STAFF_PASS: [
"uniform",
"clothing",
"badge",
"pass",
"equipment",
"workwear",
],
TEAM_CONTACTS: ["contact", "phone", "email", "reach", "team", "colleague"],
PERSONAL_QUESTIONS: ["personal", "family", "life", "private"],
ACCESS_LOGIN: [
"login",
"password",
"access",
"account",
"system",
"username",
],
SOCIAL_QUESTIONS: ["social", "chat", "friendly", "casual", "weather"],
};
const textLower = text.toLowerCase();
let bestCategory: keyof typeof categories | "UNRECOGNIZED_OTHER" =
"UNRECOGNIZED_OTHER";
let maxMatches = 0;
for (const [category, keywords] of Object.entries(categories)) {
const matches = keywords.filter((keyword) =>
textLower.includes(keyword)
).length;
if (matches > maxMatches) {
maxMatches = matches;
bestCategory = category as keyof typeof categories;
}
}
// Extract questions (sentences ending with ?)
const questions = text
.split(/[.!]+/)
.map((s) => s.trim())
.filter((s) => s.endsWith("?"))
.slice(0, 5);
// Generate summary (first sentence or truncated text)
const sentences = text.split(/[.!?]+/).filter((s) => s.trim().length > 0);
let summary = sentences[0]?.trim() || text.substring(0, 100);
if (summary.length > 150) {
summary = summary.substring(0, 147) + "...";
}
if (summary.length < 10) {
summary = "User inquiry regarding company policies";
}
// Detect language (simple heuristic)
const dutchWords = [
"het",
"de",
"een",
"en",
"van",
"is",
"dat",
"te",
"met",
"voor",
];
const germanWords = [
"der",
"die",
"das",
"und",
"ist",
"mit",
"zu",
"auf",
"für",
"von",
];
const dutchCount = dutchWords.filter((word) =>
textLower.includes(word)
).length;
const germanCount = germanWords.filter((word) =>
textLower.includes(word)
).length;
let language = "en"; // default to English
if (dutchCount > 0 && dutchCount >= germanCount) {
language = "nl";
} else if (germanCount > 0) {
language = "de";
}
// Check for escalation indicators
const escalated = /escalate|supervisor|manager|boss|higher up/i.test(text);
const forwardedHr = /hr|human resources|personnel|legal/i.test(text);
const analysisResult = {
language,
sentiment,
escalated,
forwarded_hr: forwardedHr,
category: bestCategory,
questions,
summary,
session_id: extractedSessionId,
};
const jsonContent = JSON.stringify(analysisResult);
const promptTokens = Math.ceil(text.length / 4);
const completionTokens = Math.ceil(jsonContent.length / 4);
return {
id: `chatcmpl-mock-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
object: "chat.completion",
created: Math.floor(Date.now() / 1000),
model: "gpt-4o-mini",
choices: [
{
index: 0,
message: {
role: "assistant",
content: jsonContent,
},
finish_reason: "stop",
},
],
usage: {
prompt_tokens: promptTokens,
completion_tokens: completionTokens,
total_tokens: promptTokens + completionTokens,
},
};
}
/**
* Generate realistic category classification response
*/
export function generateCategoryResponse(text: string): MockChatCompletion {
// Simple category classification logic
const categories: Record<string, string[]> = {
SCHEDULE_HOURS: ["schedule", "hours", "time", "shift", "working"],
LEAVE_VACATION: ["vacation", "leave", "time off", "holiday", "pto"],
SICK_LEAVE_RECOVERY: ["sick", "ill", "medical", "health", "doctor"],
SALARY_COMPENSATION: ["salary", "pay", "compensation", "money", "wage"],
CONTRACT_HOURS: ["contract", "agreement", "terms", "conditions"],
ONBOARDING: ["onboard", "new", "start", "first day", "welcome"],
OFFBOARDING: ["leaving", "quit", "resign", "last day", "exit"],
WORKWEAR_STAFF_PASS: ["uniform", "clothing", "badge", "pass", "equipment"],
TEAM_CONTACTS: ["contact", "phone", "email", "reach", "team"],
PERSONAL_QUESTIONS: ["personal", "family", "life", "private"],
ACCESS_LOGIN: ["login", "password", "access", "account", "system"],
SOCIAL_QUESTIONS: ["social", "chat", "friendly", "casual"],
};
const textLower = text.toLowerCase();
let bestCategory = "UNRECOGNIZED_OTHER";
let maxMatches = 0;
for (const [category, keywords] of Object.entries(categories)) {
const matches = keywords.filter((keyword) =>
textLower.includes(keyword)
).length;
if (matches > maxMatches) {
maxMatches = matches;
bestCategory = category;
}
}
const promptTokens = Math.ceil(text.length / 4);
const completionTokens = bestCategory.length / 4;
return {
id: `chatcmpl-mock-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
object: "chat.completion",
created: Math.floor(Date.now() / 1000),
model: "gpt-4o-mini",
choices: [
{
index: 0,
message: {
role: "assistant",
content: bestCategory,
},
finish_reason: "stop",
},
],
usage: {
prompt_tokens: promptTokens,
completion_tokens: completionTokens,
total_tokens: promptTokens + completionTokens,
},
};
}
/**
* Generate realistic summary response
*/
export function generateSummaryResponse(text: string): MockChatCompletion {
// Simple summarization - take first sentence or truncate
const sentences = text.split(/[.!?]+/).filter((s) => s.trim().length > 0);
let summary = sentences[0]?.trim() || text.substring(0, 100);
if (summary.length > 150) {
summary = summary.substring(0, 147) + "...";
}
const promptTokens = Math.ceil(text.length / 4);
const completionTokens = Math.ceil(summary.length / 4);
return {
id: `chatcmpl-mock-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
object: "chat.completion",
created: Math.floor(Date.now() / 1000),
model: "gpt-4o-mini",
choices: [
{
index: 0,
message: {
role: "assistant",
content: summary,
},
finish_reason: "stop",
},
],
usage: {
prompt_tokens: promptTokens,
completion_tokens: completionTokens,
total_tokens: promptTokens + completionTokens,
},
};
}
/**
* Generate realistic sentiment analysis response
*/
export function generateSentimentResponse(text: string): MockChatCompletion {
// Simple sentiment analysis logic
const positiveWords = [
"good",
"great",
"excellent",
"happy",
"satisfied",
"wonderful",
"amazing",
"pleased",
"thanks",
];
const negativeWords = [
"bad",
"terrible",
"awful",
"unhappy",
"disappointed",
"frustrated",
"angry",
"upset",
"problem",
];
const words = text.toLowerCase().split(/\s+/);
const positiveCount = words.filter((word) =>
positiveWords.some((pos) => word.includes(pos))
).length;
const negativeCount = words.filter((word) =>
negativeWords.some((neg) => word.includes(neg))
).length;
let sentiment: "POSITIVE" | "NEUTRAL" | "NEGATIVE";
if (positiveCount > negativeCount) {
sentiment = "POSITIVE";
} else if (negativeCount > positiveCount) {
sentiment = "NEGATIVE";
} else {
sentiment = "NEUTRAL";
}
const promptTokens = Math.ceil(text.length / 4);
const completionTokens = Math.ceil(sentiment.length / 4);
return {
id: `chatcmpl-mock-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
object: "chat.completion",
created: Math.floor(Date.now() / 1000),
model: "gpt-4o-mini",
choices: [
{
index: 0,
message: {
role: "assistant",
content: sentiment,
},
finish_reason: "stop",
},
],
usage: {
prompt_tokens: promptTokens,
completion_tokens: completionTokens,
total_tokens: promptTokens + completionTokens,
},
};
}
/**
* Generate realistic question extraction response
*/
export function generateQuestionExtractionResponse(
text: string
): MockChatCompletion {
// Extract sentences that end with question marks
const questions = text
.split(/[.!]+/)
.map((s) => s.trim())
.filter((s) => s.endsWith("?"))
.slice(0, 5); // Limit to 5 questions
const result =
questions.length > 0 ? questions.join("\n") : "No questions found.";
const promptTokens = Math.ceil(text.length / 4);
const completionTokens = Math.ceil(result.length / 4);
return {
id: `chatcmpl-mock-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
object: "chat.completion",
created: Math.floor(Date.now() / 1000),
model: "gpt-4o-mini",
choices: [
{
index: 0,
message: {
role: "assistant",
content: result,
},
finish_reason: "stop",
},
],
usage: {
prompt_tokens: promptTokens,
completion_tokens: completionTokens,
total_tokens: promptTokens + completionTokens,
},
};
}
/**
* Generate mock batch job response
*/
export function generateBatchResponse(
status: MockBatchResponse["status"] = "in_progress"
): MockBatchResponse {
const now = Math.floor(Date.now() / 1000);
const batchId = `batch_mock_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
const result: MockBatchResponse = {
id: batchId,
object: "batch",
endpoint: "/v1/chat/completions",
errors: {
object: "list",
data: [],
},
input_file_id: `file-mock-input-${batchId}`,
completion_window: "24h",
status,
created_at: now - 300, // 5 minutes ago
expires_at: now + 86400, // 24 hours from now
request_counts: {
total: 100,
completed:
status === "completed" ? 100 : status === "in_progress" ? 75 : 0,
failed: status === "failed" ? 25 : 0,
},
metadata: {
company_id: "test-company",
batch_type: "ai_processing",
},
};
// Set optional fields based on status
if (status === "completed") {
result.output_file_id = `file-mock-output-${batchId}`;
result.completed_at = now - 30;
}
if (status === "failed") {
result.failed_at = now - 30;
}
if (status !== "validating") {
result.in_progress_at = now - 240; // 4 minutes ago
}
if (status === "finalizing" || status === "completed") {
result.finalizing_at = now - 60;
}
return result;
}
/**
* Mock cost calculation for testing
*/
export function calculateMockCost(usage: {
prompt_tokens: number;
completion_tokens: number;
}): number {
// Mock pricing: $0.15 per 1K prompt tokens, $0.60 per 1K completion tokens (gpt-4o-mini rates)
const promptCost = (usage.prompt_tokens / 1000) * 0.15;
const completionCost = (usage.completion_tokens / 1000) * 0.6;
return promptCost + completionCost;
}
/**
* Response templates for different AI processing types
*/
export const MOCK_RESPONSE_GENERATORS = {
sentiment: generateSentimentResponse,
category: generateCategoryResponse,
summary: generateSummaryResponse,
questions: generateQuestionExtractionResponse,
} as const;
export type MockResponseType = keyof typeof MOCK_RESPONSE_GENERATORS;

View File

@ -1,15 +1,17 @@
// Enhanced session processing scheduler with AI cost tracking and question management
import {
type AIProcessingRequest,
AIRequestStatus,
ProcessingStage,
type SentimentCategory,
type SessionCategory,
AIRequestStatus,
type AIProcessingRequest,
} from "@prisma/client";
import cron from "node-cron";
import fetch from "node-fetch";
import { withRetry } from "./database-retry";
import { env } from "./env";
import { openAIMock } from "./mocks/openai-mock-server";
import { prisma } from "./prisma";
import {
completeStage,
@ -330,15 +332,17 @@ async function calculateEndTime(
}
/**
* Processes a session transcript using OpenAI API
* Processes a session transcript using OpenAI API (real or mock)
*/
async function processTranscriptWithOpenAI(
sessionId: string,
transcript: string,
companyId: string
): Promise<ProcessedData> {
if (!OPENAI_API_KEY) {
throw new Error("OPENAI_API_KEY environment variable is not set");
if (!OPENAI_API_KEY && !env.OPENAI_MOCK_MODE) {
throw new Error(
"OPENAI_API_KEY environment variable is not set (or enable OPENAI_MOCK_MODE for development)"
);
}
// Get company's AI model
@ -373,37 +377,49 @@ async function processTranscriptWithOpenAI(
`;
try {
const response = await fetch(OPENAI_API_URL, {
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${OPENAI_API_KEY}`,
},
body: JSON.stringify({
model: aiModel, // Use company's configured AI model
messages: [
{
role: "system",
content: systemMessage,
},
{
role: "user",
content: transcript,
},
],
temperature: 0.3, // Lower temperature for more consistent results
response_format: { type: "json_object" },
}),
});
let openaiResponse: OpenAIResponse;
if (!response.ok) {
const errorText = await response.text();
throw new Error(`OpenAI API error: ${response.status} - ${errorText}`);
const requestParams = {
model: aiModel, // Use company's configured AI model
messages: [
{
role: "system",
content: systemMessage,
},
{
role: "user",
content: transcript,
},
],
temperature: 0.3, // Lower temperature for more consistent results
response_format: { type: "json_object" },
};
if (env.OPENAI_MOCK_MODE) {
// Use mock OpenAI API for cost-safe development/testing
console.log(
`[OpenAI Mock] Processing session ${sessionId} with mock API`
);
openaiResponse = await openAIMock.mockChatCompletion(requestParams);
} else {
// Use real OpenAI API
const response = await fetch(OPENAI_API_URL, {
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${OPENAI_API_KEY}`,
},
body: JSON.stringify(requestParams),
});
if (!response.ok) {
const errorText = await response.text();
throw new Error(`OpenAI API error: ${response.status} - ${errorText}`);
}
openaiResponse = (await response.json()) as OpenAIResponse;
}
const openaiResponse: OpenAIResponse =
(await response.json()) as OpenAIResponse;
// Record the AI processing request for cost tracking
await recordAIProcessingRequest(
sessionId,
@ -825,7 +841,9 @@ export function startProcessingScheduler(): void {
/**
* Create batch requests for sessions needing AI processing
*/
async function createBatchRequestsForSessions(batchSize: number | null = null): Promise<void> {
async function createBatchRequestsForSessions(
batchSize: number | null = null
): Promise<void> {
// Get sessions that need AI processing using the new status system
const sessionsNeedingAI = await getSessionsNeedingProcessing(
ProcessingStage.AI_ANALYSIS,
@ -903,7 +921,10 @@ async function createBatchRequestsForSessions(batchSize: number | null = null):
batchRequests.push(processingRequest);
} catch (error) {
console.error(`Failed to create batch request for session ${session.id}:`, error);
console.error(
`Failed to create batch request for session ${session.id}:`,
error
);
await failStage(
session.id,
ProcessingStage.AI_ANALYSIS,

View File

@ -68,7 +68,9 @@ export async function sendEmail(
function getEmailConfig(): EmailConfig & { isConfigured: boolean } {
const config = {
smtpHost: process.env.SMTP_HOST,
smtpPort: process.env.SMTP_PORT ? parseInt(process.env.SMTP_PORT) : 587,
smtpPort: process.env.SMTP_PORT
? Number.parseInt(process.env.SMTP_PORT)
: 587,
smtpUser: process.env.SMTP_USER,
smtpPassword: process.env.SMTP_PASSWORD,
fromEmail: process.env.FROM_EMAIL || "noreply@livedash.app",

100
lib/trpc-client.ts Normal file
View File

@ -0,0 +1,100 @@
/**
* tRPC Client Configuration
*
* This file sets up the tRPC client for use in React components.
* Provides type-safe API calls with automatic serialization.
*/
import { httpBatchLink } from "@trpc/client";
import { createTRPCNext } from "@trpc/next";
import superjson from "superjson";
import type { AppRouter } from "@/server/routers/_app";
function getBaseUrl() {
if (typeof window !== "undefined") {
// browser should use relative path
return "";
}
if (process.env.VERCEL_URL) {
// reference for vercel.com
return `https://${process.env.VERCEL_URL}`;
}
if (process.env.RENDER_INTERNAL_HOSTNAME) {
// reference for render.com
return `http://${process.env.RENDER_INTERNAL_HOSTNAME}:${process.env.PORT}`;
}
// assume localhost
return `http://localhost:${process.env.PORT ?? 3000}`;
}
/**
* Main tRPC client instance
*/
export const trpc = createTRPCNext<AppRouter>({
config() {
return {
links: [
httpBatchLink({
/**
* If you want to use SSR, you need to use the server's full URL
* @link https://trpc.io/docs/ssr
**/
url: `${getBaseUrl()}/api/trpc`,
/**
* Transformer for data serialization
*/
transformer: superjson,
/**
* Set custom request headers on every request from tRPC
* @link https://trpc.io/docs/v10/header
*/
headers() {
return {
// Include credentials for authentication
credentials: "include",
};
},
}),
],
/**
* Query client configuration
* @link https://trpc.io/docs/v10/react-query-integration
*/
queryClientConfig: {
defaultOptions: {
queries: {
// Stale time of 30 seconds
staleTime: 30 * 1000,
// Cache time of 5 minutes
gcTime: 5 * 60 * 1000,
// Retry failed requests up to 3 times
retry: 3,
// Retry delay that increases exponentially
retryDelay: (attemptIndex) =>
Math.min(1000 * 2 ** attemptIndex, 30000),
},
mutations: {
// Retry mutations once on network errors
retry: 1,
},
},
},
};
},
/**
* Whether tRPC should await queries when server rendering pages
* @link https://trpc.io/docs/nextjs#ssr-boolean-default-false
*/
ssr: false,
transformer: superjson,
});
/**
* Type helper for tRPC router
*/
export type TRPCRouter = typeof trpc;

163
lib/trpc.ts Normal file
View File

@ -0,0 +1,163 @@
/**
* tRPC Server Configuration
*
* This file sets up the core tRPC configuration including:
* - Server context creation with authentication
* - Router initialization
* - Middleware for authentication and error handling
*/
import { initTRPC, TRPCError } from "@trpc/server";
import type { FetchCreateContextFnOptions } from "@trpc/server/adapters/fetch";
import { getServerSession } from "next-auth/next";
import superjson from "superjson";
import type { z } from "zod";
import { authOptions } from "./auth";
import { prisma } from "./prisma";
import { validateInput } from "./validation";
/**
* Create context for tRPC requests
* This runs on every request and provides:
* - Database access
* - User session information
* - Request/response objects
*/
export async function createTRPCContext(opts: FetchCreateContextFnOptions) {
const session = await getServerSession(authOptions);
return {
prisma,
session,
req: opts.req,
};
}
export type Context = Awaited<ReturnType<typeof createTRPCContext>>;
/**
* Initialize tRPC with superjson for date serialization
*/
const t = initTRPC.context<Context>().create({
transformer: superjson,
errorFormatter({ shape }) {
return shape;
},
});
/**
* Base router and middleware exports
*/
export const router = t.router;
export const publicProcedure = t.procedure;
/**
* Authentication middleware
* Throws error if user is not authenticated
*/
const enforceUserIsAuthed = t.middleware(({ ctx, next }) => {
if (!ctx.session?.user?.email) {
throw new TRPCError({ code: "UNAUTHORIZED" });
}
return next({
ctx: {
...ctx,
session: { ...ctx.session, user: ctx.session.user },
},
});
});
/**
* Company access middleware
* Ensures user has access to their company's data
*/
const enforceCompanyAccess = t.middleware(async ({ ctx, next }) => {
if (!ctx.session?.user?.email) {
throw new TRPCError({ code: "UNAUTHORIZED" });
}
const user = await ctx.prisma.user.findUnique({
where: { email: ctx.session.user.email },
include: { company: true },
});
if (!user || !user.company) {
throw new TRPCError({
code: "FORBIDDEN",
message: "User does not have company access",
});
}
return next({
ctx: {
...ctx,
user,
company: user.company,
},
});
});
/**
* Admin access middleware
* Ensures user has admin role
*/
const enforceAdminAccess = t.middleware(async ({ ctx, next }) => {
if (!ctx.session?.user?.email) {
throw new TRPCError({ code: "UNAUTHORIZED" });
}
const user = await ctx.prisma.user.findUnique({
where: { email: ctx.session.user.email },
include: { company: true },
});
if (!user || user.role !== "ADMIN") {
throw new TRPCError({
code: "FORBIDDEN",
message: "Admin access required",
});
}
return next({
ctx: {
...ctx,
user,
company: user.company,
},
});
});
/**
* Input validation middleware
* Automatically validates inputs using Zod schemas
*/
const createValidatedProcedure = <T>(schema: z.ZodSchema<T>) =>
publicProcedure.input(schema).use(({ input, next }) => {
const validation = validateInput(schema, input);
if (!validation.success) {
throw new TRPCError({
code: "BAD_REQUEST",
message: validation.errors.join(", "),
});
}
return next({ ctx: {}, input: validation.data });
});
/**
* Procedure variants for different access levels
*/
export const protectedProcedure = publicProcedure.use(enforceUserIsAuthed);
export const companyProcedure = publicProcedure.use(enforceCompanyAccess);
export const adminProcedure = publicProcedure.use(enforceAdminAccess);
export const validatedProcedure = createValidatedProcedure;
/**
* Rate limiting middleware for sensitive operations
*/
export const rateLimitedProcedure = publicProcedure.use(
async ({ ctx, next }) => {
// Rate limiting logic would go here
// For now, just pass through
return next({ ctx });
}
);