mirror of
https://github.com/kjanat/livedash-node.git
synced 2026-01-16 12:12:09 +01:00
feat: complete tRPC integration and fix platform UI issues
- Implement comprehensive tRPC setup with type-safe API - Create tRPC routers for dashboard, admin, and auth endpoints - Migrate frontend components to use tRPC client - Fix platform dashboard Settings button functionality - Add platform settings page with profile and security management - Create OpenAI API mocking infrastructure for cost-safe testing - Update tests to work with new tRPC architecture - Sync database schema to fix AIBatchRequest table errors
This commit is contained in:
416
lib/mocks/openai-mock-server.ts
Normal file
416
lib/mocks/openai-mock-server.ts
Normal file
@ -0,0 +1,416 @@
|
||||
/**
|
||||
* OpenAI API Mock Server
|
||||
*
|
||||
* Provides a drop-in replacement for OpenAI API calls during development
|
||||
* and testing to prevent unexpected costs and enable offline development.
|
||||
*/
|
||||
|
||||
import {
|
||||
calculateMockCost,
|
||||
generateBatchResponse,
|
||||
generateSessionAnalysisResponse,
|
||||
MOCK_RESPONSE_GENERATORS,
|
||||
type MockBatchResponse,
|
||||
type MockChatCompletion,
|
||||
type MockResponseType,
|
||||
} from "./openai-responses";
|
||||
|
||||
interface MockOpenAIConfig {
|
||||
enabled: boolean;
|
||||
baseDelay: number; // Base delay in ms to simulate API latency
|
||||
randomDelay: number; // Additional random delay (0 to this value)
|
||||
errorRate: number; // Probability of simulated errors (0.0 to 1.0)
|
||||
logRequests: boolean; // Whether to log mock requests
|
||||
}
|
||||
|
||||
class OpenAIMockServer {
|
||||
private config: MockOpenAIConfig;
|
||||
private totalCost = 0;
|
||||
private requestCount = 0;
|
||||
private activeBatches: Map<string, MockBatchResponse> = new Map();
|
||||
|
||||
constructor(config: Partial<MockOpenAIConfig> = {}) {
|
||||
this.config = {
|
||||
enabled: process.env.OPENAI_MOCK_MODE === "true",
|
||||
baseDelay: 500, // 500ms base delay
|
||||
randomDelay: 1000, // 0-1000ms additional delay
|
||||
errorRate: 0.02, // 2% error rate
|
||||
logRequests: process.env.NODE_ENV === "development",
|
||||
...config,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if mock mode is enabled
|
||||
*/
|
||||
isEnabled(): boolean {
|
||||
return this.config.enabled;
|
||||
}
|
||||
|
||||
/**
|
||||
* Simulate network delay
|
||||
*/
|
||||
private async simulateDelay(): Promise<void> {
|
||||
const delay =
|
||||
this.config.baseDelay + Math.random() * this.config.randomDelay;
|
||||
await new Promise((resolve) => setTimeout(resolve, delay));
|
||||
}
|
||||
|
||||
/**
|
||||
* Simulate random API errors
|
||||
*/
|
||||
private shouldSimulateError(): boolean {
|
||||
return Math.random() < this.config.errorRate;
|
||||
}
|
||||
|
||||
/**
|
||||
* Log mock requests for debugging
|
||||
*/
|
||||
private logRequest(endpoint: string, data: any): void {
|
||||
if (this.config.logRequests) {
|
||||
console.log(`[OpenAI Mock] ${endpoint}:`, JSON.stringify(data, null, 2));
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if this is a session analysis request (comprehensive JSON format)
|
||||
*/
|
||||
private isSessionAnalysisRequest(prompt: string): boolean {
|
||||
const promptLower = prompt.toLowerCase();
|
||||
return (
|
||||
promptLower.includes("session_id") &&
|
||||
(promptLower.includes("sentiment") ||
|
||||
promptLower.includes("category") ||
|
||||
promptLower.includes("language"))
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract processing type from prompt
|
||||
*/
|
||||
private extractProcessingType(prompt: string): MockResponseType {
|
||||
const promptLower = prompt.toLowerCase();
|
||||
|
||||
if (
|
||||
promptLower.includes("sentiment") ||
|
||||
promptLower.includes("positive") ||
|
||||
promptLower.includes("negative")
|
||||
) {
|
||||
return "sentiment";
|
||||
}
|
||||
if (promptLower.includes("category") || promptLower.includes("classify")) {
|
||||
return "category";
|
||||
}
|
||||
if (promptLower.includes("summary") || promptLower.includes("summarize")) {
|
||||
return "summary";
|
||||
}
|
||||
if (promptLower.includes("question") || promptLower.includes("extract")) {
|
||||
return "questions";
|
||||
}
|
||||
|
||||
// Default to sentiment analysis
|
||||
return "sentiment";
|
||||
}
|
||||
|
||||
/**
|
||||
* Mock chat completions endpoint
|
||||
*/
|
||||
async mockChatCompletion(request: {
|
||||
model: string;
|
||||
messages: Array<{ role: string; content: string }>;
|
||||
temperature?: number;
|
||||
max_tokens?: number;
|
||||
}): Promise<MockChatCompletion> {
|
||||
this.requestCount++;
|
||||
|
||||
await this.simulateDelay();
|
||||
|
||||
if (this.shouldSimulateError()) {
|
||||
throw new Error("Mock OpenAI API error: Rate limit exceeded");
|
||||
}
|
||||
|
||||
this.logRequest("/v1/chat/completions", request);
|
||||
|
||||
// Extract the user content to analyze
|
||||
const userMessage =
|
||||
request.messages.find((msg) => msg.role === "user")?.content || "";
|
||||
const systemMessage =
|
||||
request.messages.find((msg) => msg.role === "system")?.content || "";
|
||||
|
||||
let response: MockChatCompletion;
|
||||
let processingType: string;
|
||||
|
||||
// Check if this is a comprehensive session analysis request
|
||||
if (this.isSessionAnalysisRequest(systemMessage)) {
|
||||
// Extract session ID from system message for session analysis
|
||||
const sessionIdMatch = systemMessage.match(/"session_id":\s*"([^"]+)"/);
|
||||
const sessionId = sessionIdMatch?.[1] || `mock-session-${Date.now()}`;
|
||||
response = generateSessionAnalysisResponse(userMessage, sessionId);
|
||||
processingType = "session_analysis";
|
||||
} else {
|
||||
// Use simple response generators for other types
|
||||
const detectedType = this.extractProcessingType(
|
||||
systemMessage + " " + userMessage
|
||||
);
|
||||
response = MOCK_RESPONSE_GENERATORS[detectedType](userMessage);
|
||||
processingType = detectedType;
|
||||
}
|
||||
|
||||
// Track costs
|
||||
const cost = calculateMockCost(response.usage);
|
||||
this.totalCost += cost;
|
||||
|
||||
if (this.config.logRequests) {
|
||||
console.log(
|
||||
`[OpenAI Mock] Generated ${processingType} response. Cost: $${cost.toFixed(6)}, Total: $${this.totalCost.toFixed(6)}`
|
||||
);
|
||||
}
|
||||
|
||||
return response;
|
||||
}
|
||||
|
||||
/**
|
||||
* Mock batch creation endpoint
|
||||
*/
|
||||
async mockCreateBatch(request: {
|
||||
input_file_id: string;
|
||||
endpoint: string;
|
||||
completion_window: string;
|
||||
metadata?: Record<string, string>;
|
||||
}): Promise<MockBatchResponse> {
|
||||
await this.simulateDelay();
|
||||
|
||||
if (this.shouldSimulateError()) {
|
||||
throw new Error("Mock OpenAI API error: Invalid file format");
|
||||
}
|
||||
|
||||
this.logRequest("/v1/batches", request);
|
||||
|
||||
const batch = generateBatchResponse("validating");
|
||||
this.activeBatches.set(batch.id, batch);
|
||||
|
||||
// Simulate batch processing progression
|
||||
this.simulateBatchProgression(batch.id);
|
||||
|
||||
return batch;
|
||||
}
|
||||
|
||||
/**
|
||||
* Mock batch retrieval endpoint
|
||||
*/
|
||||
async mockGetBatch(batchId: string): Promise<MockBatchResponse> {
|
||||
await this.simulateDelay();
|
||||
|
||||
const batch = this.activeBatches.get(batchId);
|
||||
if (!batch) {
|
||||
throw new Error(`Mock OpenAI API error: Batch ${batchId} not found`);
|
||||
}
|
||||
|
||||
this.logRequest(`/v1/batches/${batchId}`, { batchId });
|
||||
|
||||
return batch;
|
||||
}
|
||||
|
||||
/**
|
||||
* Mock file upload endpoint
|
||||
*/
|
||||
async mockUploadFile(request: {
|
||||
file: string; // File content
|
||||
purpose: string;
|
||||
}): Promise<{
|
||||
id: string;
|
||||
object: string;
|
||||
purpose: string;
|
||||
filename: string;
|
||||
}> {
|
||||
await this.simulateDelay();
|
||||
|
||||
if (this.shouldSimulateError()) {
|
||||
throw new Error("Mock OpenAI API error: File too large");
|
||||
}
|
||||
|
||||
const fileId = `file-mock-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`;
|
||||
|
||||
this.logRequest("/v1/files", {
|
||||
purpose: request.purpose,
|
||||
size: request.file.length,
|
||||
});
|
||||
|
||||
return {
|
||||
id: fileId,
|
||||
object: "file",
|
||||
purpose: request.purpose,
|
||||
filename: "batch_input.jsonl",
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Mock file content retrieval
|
||||
*/
|
||||
async mockGetFileContent(fileId: string): Promise<string> {
|
||||
await this.simulateDelay();
|
||||
|
||||
// Find the batch that owns this file
|
||||
const batch = Array.from(this.activeBatches.values()).find(
|
||||
(b) => b.output_file_id === fileId
|
||||
);
|
||||
|
||||
if (!batch) {
|
||||
throw new Error(`Mock OpenAI API error: File ${fileId} not found`);
|
||||
}
|
||||
|
||||
// Generate mock batch results
|
||||
const results: any = [];
|
||||
for (let i = 0; i < batch.request_counts.total; i++) {
|
||||
const response = MOCK_RESPONSE_GENERATORS.sentiment(`Sample text ${i}`);
|
||||
results.push({
|
||||
id: `batch-req-${i}`,
|
||||
custom_id: `req-${i}`,
|
||||
response: {
|
||||
status_code: 200,
|
||||
request_id: `req-${Date.now()}-${i}`,
|
||||
body: response,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
return results.map((r) => JSON.stringify(r)).join("\n");
|
||||
}
|
||||
|
||||
/**
|
||||
* Simulate batch processing progression over time
|
||||
*/
|
||||
private simulateBatchProgression(batchId: string): void {
|
||||
const batch = this.activeBatches.get(batchId);
|
||||
if (!batch) return;
|
||||
|
||||
// Validating -> In Progress (after 30 seconds)
|
||||
setTimeout(() => {
|
||||
const currentBatch = this.activeBatches.get(batchId);
|
||||
if (currentBatch && currentBatch.status === "validating") {
|
||||
currentBatch.status = "in_progress";
|
||||
currentBatch.in_progress_at = Math.floor(Date.now() / 1000);
|
||||
this.activeBatches.set(batchId, currentBatch);
|
||||
}
|
||||
}, 30000);
|
||||
|
||||
// In Progress -> Finalizing (after 2 minutes)
|
||||
setTimeout(() => {
|
||||
const currentBatch = this.activeBatches.get(batchId);
|
||||
if (currentBatch && currentBatch.status === "in_progress") {
|
||||
currentBatch.status = "finalizing";
|
||||
currentBatch.finalizing_at = Math.floor(Date.now() / 1000);
|
||||
this.activeBatches.set(batchId, currentBatch);
|
||||
}
|
||||
}, 120000);
|
||||
|
||||
// Finalizing -> Completed (after 3 minutes)
|
||||
setTimeout(() => {
|
||||
const currentBatch = this.activeBatches.get(batchId);
|
||||
if (currentBatch && currentBatch.status === "finalizing") {
|
||||
currentBatch.status = "completed";
|
||||
currentBatch.completed_at = Math.floor(Date.now() / 1000);
|
||||
currentBatch.output_file_id = `file-mock-output-${batchId}`;
|
||||
currentBatch.request_counts.completed =
|
||||
currentBatch.request_counts.total;
|
||||
this.activeBatches.set(batchId, currentBatch);
|
||||
}
|
||||
}, 180000);
|
||||
}
|
||||
|
||||
/**
|
||||
* Get mock statistics
|
||||
*/
|
||||
getStats(): {
|
||||
totalCost: number;
|
||||
requestCount: number;
|
||||
activeBatches: number;
|
||||
isEnabled: boolean;
|
||||
} {
|
||||
return {
|
||||
totalCost: this.totalCost,
|
||||
requestCount: this.requestCount,
|
||||
activeBatches: this.activeBatches.size,
|
||||
isEnabled: this.config.enabled,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Reset statistics (useful for tests)
|
||||
*/
|
||||
resetStats(): void {
|
||||
this.totalCost = 0;
|
||||
this.requestCount = 0;
|
||||
this.activeBatches.clear();
|
||||
}
|
||||
|
||||
/**
|
||||
* Update configuration
|
||||
*/
|
||||
updateConfig(newConfig: Partial<MockOpenAIConfig>): void {
|
||||
this.config = { ...this.config, ...newConfig };
|
||||
}
|
||||
}
|
||||
|
||||
// Global instance
|
||||
export const openAIMock = new OpenAIMockServer();
|
||||
|
||||
/**
|
||||
* Drop-in replacement for OpenAI client that uses mocks when enabled
|
||||
*/
|
||||
export class MockOpenAIClient {
|
||||
private realClient: any;
|
||||
|
||||
constructor(realClient: any) {
|
||||
this.realClient = realClient;
|
||||
}
|
||||
|
||||
get chat() {
|
||||
return {
|
||||
completions: {
|
||||
create: async (params: any) => {
|
||||
if (openAIMock.isEnabled()) {
|
||||
return openAIMock.mockChatCompletion(params);
|
||||
}
|
||||
return this.realClient.chat.completions.create(params);
|
||||
},
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
get batches() {
|
||||
return {
|
||||
create: async (params: any) => {
|
||||
if (openAIMock.isEnabled()) {
|
||||
return openAIMock.mockCreateBatch(params);
|
||||
}
|
||||
return this.realClient.batches.create(params);
|
||||
},
|
||||
retrieve: async (batchId: string) => {
|
||||
if (openAIMock.isEnabled()) {
|
||||
return openAIMock.mockGetBatch(batchId);
|
||||
}
|
||||
return this.realClient.batches.retrieve(batchId);
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
get files() {
|
||||
return {
|
||||
create: async (params: any) => {
|
||||
if (openAIMock.isEnabled()) {
|
||||
return openAIMock.mockUploadFile(params);
|
||||
}
|
||||
return this.realClient.files.create(params);
|
||||
},
|
||||
content: async (fileId: string) => {
|
||||
if (openAIMock.isEnabled()) {
|
||||
return openAIMock.mockGetFileContent(fileId);
|
||||
}
|
||||
return this.realClient.files.content(fileId);
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
export default openAIMock;
|
||||
583
lib/mocks/openai-responses.ts
Normal file
583
lib/mocks/openai-responses.ts
Normal file
@ -0,0 +1,583 @@
|
||||
/**
|
||||
* OpenAI API Mock Response Templates
|
||||
*
|
||||
* Provides realistic response templates for cost-safe testing
|
||||
* and development without actual API calls.
|
||||
*/
|
||||
|
||||
export interface MockChatCompletion {
|
||||
id: string;
|
||||
object: "chat.completion";
|
||||
created: number;
|
||||
model: string;
|
||||
choices: Array<{
|
||||
index: number;
|
||||
message: {
|
||||
role: "assistant";
|
||||
content: string;
|
||||
};
|
||||
finish_reason: "stop" | "length" | "content_filter";
|
||||
}>;
|
||||
usage: {
|
||||
prompt_tokens: number;
|
||||
completion_tokens: number;
|
||||
total_tokens: number;
|
||||
};
|
||||
}
|
||||
|
||||
export interface MockBatchResponse {
|
||||
id: string;
|
||||
object: "batch";
|
||||
endpoint: string;
|
||||
errors: {
|
||||
object: "list";
|
||||
data: Array<{
|
||||
code: string;
|
||||
message: string;
|
||||
param?: string;
|
||||
type: string;
|
||||
}>;
|
||||
};
|
||||
input_file_id: string;
|
||||
completion_window: string;
|
||||
status:
|
||||
| "validating"
|
||||
| "in_progress"
|
||||
| "finalizing"
|
||||
| "completed"
|
||||
| "failed"
|
||||
| "expired"
|
||||
| "cancelling"
|
||||
| "cancelled";
|
||||
output_file_id?: string;
|
||||
error_file_id?: string;
|
||||
created_at: number;
|
||||
in_progress_at?: number;
|
||||
expires_at?: number;
|
||||
finalizing_at?: number;
|
||||
completed_at?: number;
|
||||
failed_at?: number;
|
||||
expired_at?: number;
|
||||
cancelling_at?: number;
|
||||
cancelled_at?: number;
|
||||
request_counts: {
|
||||
total: number;
|
||||
completed: number;
|
||||
failed: number;
|
||||
};
|
||||
metadata?: Record<string, string>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate realistic session analysis response matching the expected JSON schema
|
||||
*/
|
||||
export function generateSessionAnalysisResponse(
|
||||
text: string,
|
||||
sessionId: string
|
||||
): MockChatCompletion {
|
||||
// Extract session ID from the text if provided in system prompt
|
||||
const sessionIdMatch = text.match(/"session_id":\s*"([^"]+)"/);
|
||||
const extractedSessionId = sessionIdMatch?.[1] || sessionId;
|
||||
|
||||
// Simple sentiment analysis logic
|
||||
const positiveWords = [
|
||||
"good",
|
||||
"great",
|
||||
"excellent",
|
||||
"happy",
|
||||
"satisfied",
|
||||
"wonderful",
|
||||
"amazing",
|
||||
"pleased",
|
||||
"thanks",
|
||||
];
|
||||
const negativeWords = [
|
||||
"bad",
|
||||
"terrible",
|
||||
"awful",
|
||||
"unhappy",
|
||||
"disappointed",
|
||||
"frustrated",
|
||||
"angry",
|
||||
"upset",
|
||||
"problem",
|
||||
];
|
||||
|
||||
const words = text.toLowerCase().split(/\s+/);
|
||||
const positiveCount = words.filter((word) =>
|
||||
positiveWords.some((pos) => word.includes(pos))
|
||||
).length;
|
||||
const negativeCount = words.filter((word) =>
|
||||
negativeWords.some((neg) => word.includes(neg))
|
||||
).length;
|
||||
|
||||
let sentiment: "POSITIVE" | "NEUTRAL" | "NEGATIVE";
|
||||
if (positiveCount > negativeCount) {
|
||||
sentiment = "POSITIVE";
|
||||
} else if (negativeCount > positiveCount) {
|
||||
sentiment = "NEGATIVE";
|
||||
} else {
|
||||
sentiment = "NEUTRAL";
|
||||
}
|
||||
|
||||
// Simple category classification
|
||||
const categories: Record<string, string[]> = {
|
||||
SCHEDULE_HOURS: ["schedule", "hours", "time", "shift", "working", "clock"],
|
||||
LEAVE_VACATION: [
|
||||
"vacation",
|
||||
"leave",
|
||||
"time off",
|
||||
"holiday",
|
||||
"pto",
|
||||
"days off",
|
||||
],
|
||||
SICK_LEAVE_RECOVERY: [
|
||||
"sick",
|
||||
"ill",
|
||||
"medical",
|
||||
"health",
|
||||
"doctor",
|
||||
"recovery",
|
||||
],
|
||||
SALARY_COMPENSATION: [
|
||||
"salary",
|
||||
"pay",
|
||||
"compensation",
|
||||
"money",
|
||||
"wage",
|
||||
"payment",
|
||||
],
|
||||
CONTRACT_HOURS: ["contract", "agreement", "terms", "conditions"],
|
||||
ONBOARDING: [
|
||||
"onboard",
|
||||
"new",
|
||||
"start",
|
||||
"first day",
|
||||
"welcome",
|
||||
"orientation",
|
||||
],
|
||||
OFFBOARDING: ["leaving", "quit", "resign", "last day", "exit", "farewell"],
|
||||
WORKWEAR_STAFF_PASS: [
|
||||
"uniform",
|
||||
"clothing",
|
||||
"badge",
|
||||
"pass",
|
||||
"equipment",
|
||||
"workwear",
|
||||
],
|
||||
TEAM_CONTACTS: ["contact", "phone", "email", "reach", "team", "colleague"],
|
||||
PERSONAL_QUESTIONS: ["personal", "family", "life", "private"],
|
||||
ACCESS_LOGIN: [
|
||||
"login",
|
||||
"password",
|
||||
"access",
|
||||
"account",
|
||||
"system",
|
||||
"username",
|
||||
],
|
||||
SOCIAL_QUESTIONS: ["social", "chat", "friendly", "casual", "weather"],
|
||||
};
|
||||
|
||||
const textLower = text.toLowerCase();
|
||||
let bestCategory: keyof typeof categories | "UNRECOGNIZED_OTHER" =
|
||||
"UNRECOGNIZED_OTHER";
|
||||
let maxMatches = 0;
|
||||
|
||||
for (const [category, keywords] of Object.entries(categories)) {
|
||||
const matches = keywords.filter((keyword) =>
|
||||
textLower.includes(keyword)
|
||||
).length;
|
||||
if (matches > maxMatches) {
|
||||
maxMatches = matches;
|
||||
bestCategory = category as keyof typeof categories;
|
||||
}
|
||||
}
|
||||
|
||||
// Extract questions (sentences ending with ?)
|
||||
const questions = text
|
||||
.split(/[.!]+/)
|
||||
.map((s) => s.trim())
|
||||
.filter((s) => s.endsWith("?"))
|
||||
.slice(0, 5);
|
||||
|
||||
// Generate summary (first sentence or truncated text)
|
||||
const sentences = text.split(/[.!?]+/).filter((s) => s.trim().length > 0);
|
||||
let summary = sentences[0]?.trim() || text.substring(0, 100);
|
||||
if (summary.length > 150) {
|
||||
summary = summary.substring(0, 147) + "...";
|
||||
}
|
||||
if (summary.length < 10) {
|
||||
summary = "User inquiry regarding company policies";
|
||||
}
|
||||
|
||||
// Detect language (simple heuristic)
|
||||
const dutchWords = [
|
||||
"het",
|
||||
"de",
|
||||
"een",
|
||||
"en",
|
||||
"van",
|
||||
"is",
|
||||
"dat",
|
||||
"te",
|
||||
"met",
|
||||
"voor",
|
||||
];
|
||||
const germanWords = [
|
||||
"der",
|
||||
"die",
|
||||
"das",
|
||||
"und",
|
||||
"ist",
|
||||
"mit",
|
||||
"zu",
|
||||
"auf",
|
||||
"für",
|
||||
"von",
|
||||
];
|
||||
const dutchCount = dutchWords.filter((word) =>
|
||||
textLower.includes(word)
|
||||
).length;
|
||||
const germanCount = germanWords.filter((word) =>
|
||||
textLower.includes(word)
|
||||
).length;
|
||||
|
||||
let language = "en"; // default to English
|
||||
if (dutchCount > 0 && dutchCount >= germanCount) {
|
||||
language = "nl";
|
||||
} else if (germanCount > 0) {
|
||||
language = "de";
|
||||
}
|
||||
|
||||
// Check for escalation indicators
|
||||
const escalated = /escalate|supervisor|manager|boss|higher up/i.test(text);
|
||||
const forwardedHr = /hr|human resources|personnel|legal/i.test(text);
|
||||
|
||||
const analysisResult = {
|
||||
language,
|
||||
sentiment,
|
||||
escalated,
|
||||
forwarded_hr: forwardedHr,
|
||||
category: bestCategory,
|
||||
questions,
|
||||
summary,
|
||||
session_id: extractedSessionId,
|
||||
};
|
||||
|
||||
const jsonContent = JSON.stringify(analysisResult);
|
||||
const promptTokens = Math.ceil(text.length / 4);
|
||||
const completionTokens = Math.ceil(jsonContent.length / 4);
|
||||
|
||||
return {
|
||||
id: `chatcmpl-mock-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
|
||||
object: "chat.completion",
|
||||
created: Math.floor(Date.now() / 1000),
|
||||
model: "gpt-4o-mini",
|
||||
choices: [
|
||||
{
|
||||
index: 0,
|
||||
message: {
|
||||
role: "assistant",
|
||||
content: jsonContent,
|
||||
},
|
||||
finish_reason: "stop",
|
||||
},
|
||||
],
|
||||
usage: {
|
||||
prompt_tokens: promptTokens,
|
||||
completion_tokens: completionTokens,
|
||||
total_tokens: promptTokens + completionTokens,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate realistic category classification response
|
||||
*/
|
||||
export function generateCategoryResponse(text: string): MockChatCompletion {
|
||||
// Simple category classification logic
|
||||
const categories: Record<string, string[]> = {
|
||||
SCHEDULE_HOURS: ["schedule", "hours", "time", "shift", "working"],
|
||||
LEAVE_VACATION: ["vacation", "leave", "time off", "holiday", "pto"],
|
||||
SICK_LEAVE_RECOVERY: ["sick", "ill", "medical", "health", "doctor"],
|
||||
SALARY_COMPENSATION: ["salary", "pay", "compensation", "money", "wage"],
|
||||
CONTRACT_HOURS: ["contract", "agreement", "terms", "conditions"],
|
||||
ONBOARDING: ["onboard", "new", "start", "first day", "welcome"],
|
||||
OFFBOARDING: ["leaving", "quit", "resign", "last day", "exit"],
|
||||
WORKWEAR_STAFF_PASS: ["uniform", "clothing", "badge", "pass", "equipment"],
|
||||
TEAM_CONTACTS: ["contact", "phone", "email", "reach", "team"],
|
||||
PERSONAL_QUESTIONS: ["personal", "family", "life", "private"],
|
||||
ACCESS_LOGIN: ["login", "password", "access", "account", "system"],
|
||||
SOCIAL_QUESTIONS: ["social", "chat", "friendly", "casual"],
|
||||
};
|
||||
|
||||
const textLower = text.toLowerCase();
|
||||
let bestCategory = "UNRECOGNIZED_OTHER";
|
||||
let maxMatches = 0;
|
||||
|
||||
for (const [category, keywords] of Object.entries(categories)) {
|
||||
const matches = keywords.filter((keyword) =>
|
||||
textLower.includes(keyword)
|
||||
).length;
|
||||
if (matches > maxMatches) {
|
||||
maxMatches = matches;
|
||||
bestCategory = category;
|
||||
}
|
||||
}
|
||||
|
||||
const promptTokens = Math.ceil(text.length / 4);
|
||||
const completionTokens = bestCategory.length / 4;
|
||||
|
||||
return {
|
||||
id: `chatcmpl-mock-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
|
||||
object: "chat.completion",
|
||||
created: Math.floor(Date.now() / 1000),
|
||||
model: "gpt-4o-mini",
|
||||
choices: [
|
||||
{
|
||||
index: 0,
|
||||
message: {
|
||||
role: "assistant",
|
||||
content: bestCategory,
|
||||
},
|
||||
finish_reason: "stop",
|
||||
},
|
||||
],
|
||||
usage: {
|
||||
prompt_tokens: promptTokens,
|
||||
completion_tokens: completionTokens,
|
||||
total_tokens: promptTokens + completionTokens,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate realistic summary response
|
||||
*/
|
||||
export function generateSummaryResponse(text: string): MockChatCompletion {
|
||||
// Simple summarization - take first sentence or truncate
|
||||
const sentences = text.split(/[.!?]+/).filter((s) => s.trim().length > 0);
|
||||
let summary = sentences[0]?.trim() || text.substring(0, 100);
|
||||
|
||||
if (summary.length > 150) {
|
||||
summary = summary.substring(0, 147) + "...";
|
||||
}
|
||||
|
||||
const promptTokens = Math.ceil(text.length / 4);
|
||||
const completionTokens = Math.ceil(summary.length / 4);
|
||||
|
||||
return {
|
||||
id: `chatcmpl-mock-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
|
||||
object: "chat.completion",
|
||||
created: Math.floor(Date.now() / 1000),
|
||||
model: "gpt-4o-mini",
|
||||
choices: [
|
||||
{
|
||||
index: 0,
|
||||
message: {
|
||||
role: "assistant",
|
||||
content: summary,
|
||||
},
|
||||
finish_reason: "stop",
|
||||
},
|
||||
],
|
||||
usage: {
|
||||
prompt_tokens: promptTokens,
|
||||
completion_tokens: completionTokens,
|
||||
total_tokens: promptTokens + completionTokens,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate realistic sentiment analysis response
|
||||
*/
|
||||
export function generateSentimentResponse(text: string): MockChatCompletion {
|
||||
// Simple sentiment analysis logic
|
||||
const positiveWords = [
|
||||
"good",
|
||||
"great",
|
||||
"excellent",
|
||||
"happy",
|
||||
"satisfied",
|
||||
"wonderful",
|
||||
"amazing",
|
||||
"pleased",
|
||||
"thanks",
|
||||
];
|
||||
const negativeWords = [
|
||||
"bad",
|
||||
"terrible",
|
||||
"awful",
|
||||
"unhappy",
|
||||
"disappointed",
|
||||
"frustrated",
|
||||
"angry",
|
||||
"upset",
|
||||
"problem",
|
||||
];
|
||||
|
||||
const words = text.toLowerCase().split(/\s+/);
|
||||
const positiveCount = words.filter((word) =>
|
||||
positiveWords.some((pos) => word.includes(pos))
|
||||
).length;
|
||||
const negativeCount = words.filter((word) =>
|
||||
negativeWords.some((neg) => word.includes(neg))
|
||||
).length;
|
||||
|
||||
let sentiment: "POSITIVE" | "NEUTRAL" | "NEGATIVE";
|
||||
if (positiveCount > negativeCount) {
|
||||
sentiment = "POSITIVE";
|
||||
} else if (negativeCount > positiveCount) {
|
||||
sentiment = "NEGATIVE";
|
||||
} else {
|
||||
sentiment = "NEUTRAL";
|
||||
}
|
||||
|
||||
const promptTokens = Math.ceil(text.length / 4);
|
||||
const completionTokens = Math.ceil(sentiment.length / 4);
|
||||
|
||||
return {
|
||||
id: `chatcmpl-mock-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
|
||||
object: "chat.completion",
|
||||
created: Math.floor(Date.now() / 1000),
|
||||
model: "gpt-4o-mini",
|
||||
choices: [
|
||||
{
|
||||
index: 0,
|
||||
message: {
|
||||
role: "assistant",
|
||||
content: sentiment,
|
||||
},
|
||||
finish_reason: "stop",
|
||||
},
|
||||
],
|
||||
usage: {
|
||||
prompt_tokens: promptTokens,
|
||||
completion_tokens: completionTokens,
|
||||
total_tokens: promptTokens + completionTokens,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate realistic question extraction response
|
||||
*/
|
||||
export function generateQuestionExtractionResponse(
|
||||
text: string
|
||||
): MockChatCompletion {
|
||||
// Extract sentences that end with question marks
|
||||
const questions = text
|
||||
.split(/[.!]+/)
|
||||
.map((s) => s.trim())
|
||||
.filter((s) => s.endsWith("?"))
|
||||
.slice(0, 5); // Limit to 5 questions
|
||||
|
||||
const result =
|
||||
questions.length > 0 ? questions.join("\n") : "No questions found.";
|
||||
|
||||
const promptTokens = Math.ceil(text.length / 4);
|
||||
const completionTokens = Math.ceil(result.length / 4);
|
||||
|
||||
return {
|
||||
id: `chatcmpl-mock-${Date.now()}-${Math.random().toString(36).substr(2, 9)}`,
|
||||
object: "chat.completion",
|
||||
created: Math.floor(Date.now() / 1000),
|
||||
model: "gpt-4o-mini",
|
||||
choices: [
|
||||
{
|
||||
index: 0,
|
||||
message: {
|
||||
role: "assistant",
|
||||
content: result,
|
||||
},
|
||||
finish_reason: "stop",
|
||||
},
|
||||
],
|
||||
usage: {
|
||||
prompt_tokens: promptTokens,
|
||||
completion_tokens: completionTokens,
|
||||
total_tokens: promptTokens + completionTokens,
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate mock batch job response
|
||||
*/
|
||||
export function generateBatchResponse(
|
||||
status: MockBatchResponse["status"] = "in_progress"
|
||||
): MockBatchResponse {
|
||||
const now = Math.floor(Date.now() / 1000);
|
||||
const batchId = `batch_mock_${Date.now()}_${Math.random().toString(36).substr(2, 9)}`;
|
||||
|
||||
const result: MockBatchResponse = {
|
||||
id: batchId,
|
||||
object: "batch",
|
||||
endpoint: "/v1/chat/completions",
|
||||
errors: {
|
||||
object: "list",
|
||||
data: [],
|
||||
},
|
||||
input_file_id: `file-mock-input-${batchId}`,
|
||||
completion_window: "24h",
|
||||
status,
|
||||
created_at: now - 300, // 5 minutes ago
|
||||
expires_at: now + 86400, // 24 hours from now
|
||||
request_counts: {
|
||||
total: 100,
|
||||
completed:
|
||||
status === "completed" ? 100 : status === "in_progress" ? 75 : 0,
|
||||
failed: status === "failed" ? 25 : 0,
|
||||
},
|
||||
metadata: {
|
||||
company_id: "test-company",
|
||||
batch_type: "ai_processing",
|
||||
},
|
||||
};
|
||||
|
||||
// Set optional fields based on status
|
||||
if (status === "completed") {
|
||||
result.output_file_id = `file-mock-output-${batchId}`;
|
||||
result.completed_at = now - 30;
|
||||
}
|
||||
|
||||
if (status === "failed") {
|
||||
result.failed_at = now - 30;
|
||||
}
|
||||
|
||||
if (status !== "validating") {
|
||||
result.in_progress_at = now - 240; // 4 minutes ago
|
||||
}
|
||||
|
||||
if (status === "finalizing" || status === "completed") {
|
||||
result.finalizing_at = now - 60;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Mock cost calculation for testing
|
||||
*/
|
||||
export function calculateMockCost(usage: {
|
||||
prompt_tokens: number;
|
||||
completion_tokens: number;
|
||||
}): number {
|
||||
// Mock pricing: $0.15 per 1K prompt tokens, $0.60 per 1K completion tokens (gpt-4o-mini rates)
|
||||
const promptCost = (usage.prompt_tokens / 1000) * 0.15;
|
||||
const completionCost = (usage.completion_tokens / 1000) * 0.6;
|
||||
return promptCost + completionCost;
|
||||
}
|
||||
|
||||
/**
|
||||
* Response templates for different AI processing types
|
||||
*/
|
||||
export const MOCK_RESPONSE_GENERATORS = {
|
||||
sentiment: generateSentimentResponse,
|
||||
category: generateCategoryResponse,
|
||||
summary: generateSummaryResponse,
|
||||
questions: generateQuestionExtractionResponse,
|
||||
} as const;
|
||||
|
||||
export type MockResponseType = keyof typeof MOCK_RESPONSE_GENERATORS;
|
||||
Reference in New Issue
Block a user