refactor: fix biome linting issues and update project documentation

- Fix 36+ biome linting issues reducing errors/warnings from 227 to 191
- Replace explicit 'any' types with proper TypeScript interfaces
- Fix React hooks dependencies and useCallback patterns
- Resolve unused variables and parameter assignment issues
- Improve accessibility with proper label associations
- Add comprehensive API documentation for admin and security features
- Update README.md with accurate PostgreSQL setup and current tech stack
- Create complete documentation for audit logging, CSP monitoring, and batch processing
- Fix outdated project information and missing developer workflows
This commit is contained in:
2025-07-11 21:50:53 +02:00
committed by Kaj Kowalski
parent 3e9e75e854
commit 1eea2cc3e4
121 changed files with 28687 additions and 4895 deletions

513
lib/auditLogRetention.ts Normal file
View File

@ -0,0 +1,513 @@
import { prisma } from "./prisma";
import {
AuditOutcome,
createAuditMetadata,
SecurityEventType,
securityAuditLogger,
} from "./securityAuditLogger";
export interface RetentionPolicy {
name: string;
maxAgeDays: number;
severityFilter?: string[];
eventTypeFilter?: string[];
archiveBeforeDelete?: boolean;
}
export const DEFAULT_RETENTION_POLICIES: RetentionPolicy[] = [
{
name: "Critical Events",
maxAgeDays: 2555, // 7 years for critical security events
severityFilter: ["CRITICAL"],
archiveBeforeDelete: true,
},
{
name: "High Severity Events",
maxAgeDays: 1095, // 3 years for high severity events
severityFilter: ["HIGH"],
archiveBeforeDelete: true,
},
{
name: "Authentication Events",
maxAgeDays: 730, // 2 years for authentication events
eventTypeFilter: ["AUTHENTICATION", "AUTHORIZATION", "PASSWORD_RESET"],
archiveBeforeDelete: true,
},
{
name: "Platform Admin Events",
maxAgeDays: 1095, // 3 years for platform admin activities
eventTypeFilter: ["PLATFORM_ADMIN", "COMPANY_MANAGEMENT"],
archiveBeforeDelete: true,
},
{
name: "User Management Events",
maxAgeDays: 730, // 2 years for user management
eventTypeFilter: ["USER_MANAGEMENT"],
archiveBeforeDelete: true,
},
{
name: "General Events",
maxAgeDays: 365, // 1 year for general events
severityFilter: ["INFO", "LOW", "MEDIUM"],
archiveBeforeDelete: false,
},
];
export class AuditLogRetentionManager {
private policies: RetentionPolicy[];
private isDryRun: boolean;
constructor(
policies: RetentionPolicy[] = DEFAULT_RETENTION_POLICIES,
isDryRun = false
) {
this.policies = policies;
this.isDryRun = isDryRun;
}
async executeRetentionPolicies(): Promise<{
totalProcessed: number;
totalDeleted: number;
totalArchived: number;
policyResults: Array<{
policyName: string;
processed: number;
deleted: number;
archived: number;
errors: string[];
}>;
}> {
const results = {
totalProcessed: 0,
totalDeleted: 0,
totalArchived: 0,
policyResults: [] as Array<{
policyName: string;
processed: number;
deleted: number;
archived: number;
errors: string[];
}>,
};
// Log retention policy execution start
await securityAuditLogger.log({
eventType: SecurityEventType.SYSTEM_CONFIG,
action: this.isDryRun
? "audit_log_retention_dry_run_started"
: "audit_log_retention_started",
outcome: AuditOutcome.SUCCESS,
context: {
metadata: createAuditMetadata({
policiesCount: this.policies.length,
isDryRun: this.isDryRun,
policies: this.policies.map((p) => ({
name: p.name,
maxAgeDays: p.maxAgeDays,
hasArchive: p.archiveBeforeDelete,
})),
}),
},
});
for (const policy of this.policies) {
const policyResult = {
policyName: policy.name,
processed: 0,
deleted: 0,
archived: 0,
errors: [] as string[],
};
try {
const cutoffDate = new Date();
cutoffDate.setDate(cutoffDate.getDate() - policy.maxAgeDays);
// Build where clause based on policy filters
const whereClause: any = {
timestamp: { lt: cutoffDate },
};
if (policy.severityFilter && policy.severityFilter.length > 0) {
whereClause.severity = { in: policy.severityFilter };
}
if (policy.eventTypeFilter && policy.eventTypeFilter.length > 0) {
whereClause.eventType = { in: policy.eventTypeFilter };
}
// Count logs to be processed
const logsToProcess = await prisma.securityAuditLog.count({
where: whereClause,
});
policyResult.processed = logsToProcess;
if (logsToProcess === 0) {
console.log(
`Policy "${policy.name}": No logs found for retention processing`
);
continue;
}
console.log(
`Policy "${policy.name}": Processing ${logsToProcess} logs older than ${policy.maxAgeDays} days`
);
if (this.isDryRun) {
console.log(
`DRY RUN: Would process ${logsToProcess} logs for policy "${policy.name}"`
);
if (policy.archiveBeforeDelete) {
policyResult.archived = logsToProcess;
} else {
policyResult.deleted = logsToProcess;
}
} else {
if (policy.archiveBeforeDelete) {
// In a real implementation, you would export/archive these logs
// For now, we'll just log the archival action
await securityAuditLogger.log({
eventType: SecurityEventType.DATA_PRIVACY,
action: "audit_logs_archived",
outcome: AuditOutcome.SUCCESS,
context: {
metadata: createAuditMetadata({
policyName: policy.name,
logsArchived: logsToProcess,
cutoffDate: cutoffDate.toISOString(),
}),
},
});
policyResult.archived = logsToProcess;
console.log(
`Policy "${policy.name}": Archived ${logsToProcess} logs`
);
}
// Delete the logs
const deleteResult = await prisma.securityAuditLog.deleteMany({
where: whereClause,
});
policyResult.deleted = deleteResult.count;
console.log(
`Policy "${policy.name}": Deleted ${deleteResult.count} logs`
);
// Log deletion action
await securityAuditLogger.log({
eventType: SecurityEventType.DATA_PRIVACY,
action: "audit_logs_deleted",
outcome: AuditOutcome.SUCCESS,
context: {
metadata: createAuditMetadata({
policyName: policy.name,
logsDeleted: deleteResult.count,
cutoffDate: cutoffDate.toISOString(),
wasArchived: policy.archiveBeforeDelete,
}),
},
});
}
} catch (error) {
const errorMessage = `Error processing policy "${policy.name}": ${error}`;
policyResult.errors.push(errorMessage);
console.error(errorMessage);
// Log retention policy error
await securityAuditLogger.log({
eventType: SecurityEventType.SYSTEM_CONFIG,
action: "audit_log_retention_policy_error",
outcome: AuditOutcome.FAILURE,
errorMessage: errorMessage,
context: {
metadata: createAuditMetadata({
policyName: policy.name,
error: "retention_policy_error",
}),
},
});
}
results.policyResults.push(policyResult);
results.totalProcessed += policyResult.processed;
results.totalDeleted += policyResult.deleted;
results.totalArchived += policyResult.archived;
}
// Log retention policy execution completion
await securityAuditLogger.log({
eventType: SecurityEventType.SYSTEM_CONFIG,
action: this.isDryRun
? "audit_log_retention_dry_run_completed"
: "audit_log_retention_completed",
outcome: AuditOutcome.SUCCESS,
context: {
metadata: createAuditMetadata({
totalProcessed: results.totalProcessed,
totalDeleted: results.totalDeleted,
totalArchived: results.totalArchived,
policiesExecuted: this.policies.length,
isDryRun: this.isDryRun,
results: results.policyResults,
}),
},
});
return results;
}
async getRetentionStatistics(): Promise<{
totalLogs: number;
logsByEventType: Record<string, number>;
logsBySeverity: Record<string, number>;
logsByAge: Array<{ age: string; count: number }>;
oldestLog?: Date;
newestLog?: Date;
}> {
const [totalLogs, logsByEventType, logsBySeverity, oldestLog, newestLog] =
await Promise.all([
// Total count
prisma.securityAuditLog.count(),
// Group by event type
prisma.securityAuditLog.groupBy({
by: ["eventType"],
_count: { id: true },
}),
// Group by severity
prisma.securityAuditLog.groupBy({
by: ["severity"],
_count: { id: true },
}),
// Oldest log
prisma.securityAuditLog.findFirst({
orderBy: { timestamp: "asc" },
select: { timestamp: true },
}),
// Newest log
prisma.securityAuditLog.findFirst({
orderBy: { timestamp: "desc" },
select: { timestamp: true },
}),
]);
// Calculate logs by age buckets
const now = new Date();
const ageBuckets = [
{ name: "Last 24 hours", days: 1 },
{ name: "Last 7 days", days: 7 },
{ name: "Last 30 days", days: 30 },
{ name: "Last 90 days", days: 90 },
{ name: "Last 365 days", days: 365 },
{ name: "Older than 1 year", days: Number.POSITIVE_INFINITY },
];
const logsByAge: Array<{ age: string; count: number }> = [];
let previousDate = now;
for (const bucket of ageBuckets) {
const bucketDate =
bucket.days === Number.POSITIVE_INFINITY
? new Date(0)
: new Date(now.getTime() - bucket.days * 24 * 60 * 60 * 1000);
const count = await prisma.securityAuditLog.count({
where: {
timestamp: {
gte: bucketDate,
lt: previousDate,
},
},
});
logsByAge.push({
age: bucket.name,
count,
});
previousDate = bucketDate;
}
return {
totalLogs,
logsByEventType: Object.fromEntries(
logsByEventType.map((item) => [item.eventType, item._count.id])
),
logsBySeverity: Object.fromEntries(
logsBySeverity.map((item) => [item.severity, item._count.id])
),
logsByAge,
oldestLog: oldestLog?.timestamp,
newestLog: newestLog?.timestamp,
};
}
async validateRetentionPolicies(): Promise<{
valid: boolean;
errors: string[];
warnings: string[];
}> {
const errors: string[] = [];
const warnings: string[] = [];
for (const policy of this.policies) {
// Validate policy structure
if (!policy.name || policy.name.trim() === "") {
errors.push("Policy must have a non-empty name");
}
if (!policy.maxAgeDays || policy.maxAgeDays <= 0) {
errors.push(
`Policy "${policy.name}": maxAgeDays must be a positive number`
);
}
// Validate filters
if (policy.severityFilter && policy.eventTypeFilter) {
warnings.push(
`Policy "${policy.name}": Has both severity and event type filters, ensure this is intentional`
);
}
if (!policy.severityFilter && !policy.eventTypeFilter) {
warnings.push(
`Policy "${policy.name}": No filters specified, will apply to all logs`
);
}
// Warn about very short retention periods
if (policy.maxAgeDays < 30) {
warnings.push(
`Policy "${policy.name}": Very short retention period (${policy.maxAgeDays} days)`
);
}
// Warn about very long retention periods without archiving
if (policy.maxAgeDays > 1095 && !policy.archiveBeforeDelete) {
warnings.push(
`Policy "${policy.name}": Long retention period without archiving may impact performance`
);
}
}
// Check for overlapping policies that might conflict
const overlaps = this.findPolicyOverlaps();
if (overlaps.length > 0) {
warnings.push(
...overlaps.map(
(overlap) =>
`Potential policy overlap: "${overlap.policy1}" and "${overlap.policy2}"`
)
);
}
return {
valid: errors.length === 0,
errors,
warnings,
};
}
private findPolicyOverlaps(): Array<{ policy1: string; policy2: string }> {
const overlaps: Array<{ policy1: string; policy2: string }> = [];
for (let i = 0; i < this.policies.length; i++) {
for (let j = i + 1; j < this.policies.length; j++) {
const policy1 = this.policies[i];
const policy2 = this.policies[j];
// Check if policies have overlapping filters
const hasOverlappingSeverity = this.arraysOverlap(
policy1.severityFilter || [],
policy2.severityFilter || []
);
const hasOverlappingEventType = this.arraysOverlap(
policy1.eventTypeFilter || [],
policy2.eventTypeFilter || []
);
if (hasOverlappingSeverity || hasOverlappingEventType) {
overlaps.push({ policy1: policy1.name, policy2: policy2.name });
}
}
}
return overlaps;
}
private arraysOverlap(arr1: string[], arr2: string[]): boolean {
if (arr1.length === 0 || arr2.length === 0) return false;
return arr1.some((item) => arr2.includes(item));
}
}
// Utility function for scheduled retention execution
export async function executeScheduledRetention(
isDryRun = false
): Promise<void> {
const manager = new AuditLogRetentionManager(
DEFAULT_RETENTION_POLICIES,
isDryRun
);
console.log(
`Starting scheduled audit log retention (dry run: ${isDryRun})...`
);
try {
// Validate policies first
const validation = await manager.validateRetentionPolicies();
if (!validation.valid) {
throw new Error(
`Invalid retention policies: ${validation.errors.join(", ")}`
);
}
if (validation.warnings.length > 0) {
console.warn("Retention policy warnings:", validation.warnings);
}
// Execute retention
const results = await manager.executeRetentionPolicies();
console.log("Retention execution completed:");
console.log(` Total processed: ${results.totalProcessed}`);
console.log(` Total deleted: ${results.totalDeleted}`);
console.log(` Total archived: ${results.totalArchived}`);
// Log detailed results
for (const policyResult of results.policyResults) {
console.log(` Policy "${policyResult.policyName}":`);
console.log(` Processed: ${policyResult.processed}`);
console.log(` Deleted: ${policyResult.deleted}`);
console.log(` Archived: ${policyResult.archived}`);
if (policyResult.errors.length > 0) {
console.log(` Errors: ${policyResult.errors.join(", ")}`);
}
}
} catch (error) {
console.error("Scheduled retention execution failed:", error);
await securityAuditLogger.log({
eventType: SecurityEventType.SYSTEM_CONFIG,
action: "scheduled_retention_failed",
outcome: AuditOutcome.FAILURE,
errorMessage: `Scheduled retention failed: ${error}`,
context: {
metadata: createAuditMetadata({
isDryRun,
error: "scheduled_retention_failure",
}),
},
});
throw error;
}
}

180
lib/auditLogScheduler.ts Normal file
View File

@ -0,0 +1,180 @@
import cron from "node-cron";
import { executeScheduledRetention } from "./auditLogRetention";
import {
AuditOutcome,
createAuditMetadata,
SecurityEventType,
securityAuditLogger,
} from "./securityAuditLogger";
export class AuditLogScheduler {
private retentionTask: cron.ScheduledTask | null = null;
private isRunning = false;
constructor() {
this.isRunning = false;
}
start(): void {
if (this.isRunning) {
console.log("Audit log scheduler is already running");
return;
}
const retentionSchedule =
process.env.AUDIT_LOG_RETENTION_SCHEDULE || "0 2 * * 0"; // Default: 2 AM every Sunday
const isDryRun = process.env.AUDIT_LOG_RETENTION_DRY_RUN === "true";
console.log(
`Starting audit log scheduler with schedule: ${retentionSchedule}`
);
console.log(`Dry run mode: ${isDryRun}`);
// Schedule retention policy execution
this.retentionTask = cron.schedule(
retentionSchedule,
async () => {
console.log("Executing scheduled audit log retention...");
try {
await executeScheduledRetention(isDryRun);
await securityAuditLogger.log({
eventType: SecurityEventType.SYSTEM_CONFIG,
action: "scheduled_audit_retention_success",
outcome: AuditOutcome.SUCCESS,
context: {
metadata: createAuditMetadata({
schedule: retentionSchedule,
isDryRun,
executionTime: new Date().toISOString(),
}),
},
});
} catch (error) {
console.error("Scheduled audit log retention failed:", error);
await securityAuditLogger.log({
eventType: SecurityEventType.SYSTEM_CONFIG,
action: "scheduled_audit_retention_failure",
outcome: AuditOutcome.FAILURE,
errorMessage: `Scheduled audit retention failed: ${error}`,
context: {
metadata: createAuditMetadata({
schedule: retentionSchedule,
isDryRun,
executionTime: new Date().toISOString(),
error: "retention_execution_failed",
}),
},
});
}
},
{
scheduled: false, // Don't start immediately
timezone: "UTC", // Use UTC to avoid timezone issues
}
);
this.retentionTask.start();
this.isRunning = true;
// Log scheduler startup
securityAuditLogger.log({
eventType: SecurityEventType.SYSTEM_CONFIG,
action: "audit_log_scheduler_started",
outcome: AuditOutcome.SUCCESS,
context: {
metadata: createAuditMetadata({
retentionSchedule,
isDryRun,
timezone: "UTC",
}),
},
});
console.log("Audit log scheduler started successfully");
}
stop(): void {
if (!this.isRunning) {
console.log("Audit log scheduler is not running");
return;
}
if (this.retentionTask) {
this.retentionTask.stop();
this.retentionTask = null;
}
this.isRunning = false;
// Log scheduler shutdown
securityAuditLogger.log({
eventType: SecurityEventType.SYSTEM_CONFIG,
action: "audit_log_scheduler_stopped",
outcome: AuditOutcome.SUCCESS,
context: {
metadata: createAuditMetadata({
shutdownTime: new Date().toISOString(),
}),
},
});
console.log("Audit log scheduler stopped");
}
getStatus(): {
isRunning: boolean;
nextExecution?: Date;
schedule?: string;
} {
return {
isRunning: this.isRunning,
nextExecution: this.retentionTask?.getStatus()?.next || undefined,
schedule: process.env.AUDIT_LOG_RETENTION_SCHEDULE || "0 2 * * 0",
};
}
async executeNow(isDryRun = false): Promise<void> {
console.log(
`Manually executing audit log retention (dry run: ${isDryRun})...`
);
await securityAuditLogger.log({
eventType: SecurityEventType.SYSTEM_CONFIG,
action: "manual_audit_retention_triggered",
outcome: AuditOutcome.SUCCESS,
context: {
metadata: createAuditMetadata({
isDryRun,
triggerTime: new Date().toISOString(),
triggerType: "manual",
}),
},
});
try {
await executeScheduledRetention(isDryRun);
} catch (error) {
await securityAuditLogger.log({
eventType: SecurityEventType.SYSTEM_CONFIG,
action: "manual_audit_retention_failed",
outcome: AuditOutcome.FAILURE,
errorMessage: `Manual audit retention failed: ${error}`,
context: {
metadata: createAuditMetadata({
isDryRun,
triggerTime: new Date().toISOString(),
triggerType: "manual",
error: "retention_execution_failed",
}),
},
});
throw error;
}
}
}
// Export singleton instance
export const auditLogScheduler = new AuditLogScheduler();

View File

@ -2,6 +2,13 @@ import bcrypt from "bcryptjs";
import type { NextAuthOptions } from "next-auth";
import CredentialsProvider from "next-auth/providers/credentials";
import { prisma } from "./prisma";
import {
AuditOutcome,
AuditSeverity,
createAuditMetadata,
SecurityEventType,
} from "./securityAuditLogger";
import { enhancedSecurityLog } from "./securityMonitoring";
// Define the shape of the JWT token
declare module "next-auth/jwt" {
@ -47,8 +54,25 @@ export const authOptions: NextAuthOptions = {
email: { label: "Email", type: "email" },
password: { label: "Password", type: "password" },
},
async authorize(credentials) {
async authorize(credentials, _req) {
if (!credentials?.email || !credentials?.password) {
await enhancedSecurityLog(
SecurityEventType.AUTHENTICATION,
"login_attempt",
AuditOutcome.FAILURE,
{
metadata: createAuditMetadata({
error: "missing_credentials",
email: credentials?.email ? "[REDACTED]" : "missing",
}),
},
AuditSeverity.MEDIUM,
"Missing email or password",
{
attemptType: "missing_credentials",
endpoint: "/api/auth/signin",
}
);
return null;
}
@ -58,6 +82,24 @@ export const authOptions: NextAuthOptions = {
});
if (!user || !user.password) {
await enhancedSecurityLog(
SecurityEventType.AUTHENTICATION,
"login_attempt",
AuditOutcome.FAILURE,
{
metadata: createAuditMetadata({
error: "user_not_found",
email: "[REDACTED]",
}),
},
AuditSeverity.MEDIUM,
"User not found or no password set",
{
attemptType: "user_not_found",
email: credentials.email,
endpoint: "/api/auth/signin",
}
);
return null;
}
@ -67,14 +109,77 @@ export const authOptions: NextAuthOptions = {
);
if (!isPasswordValid) {
await enhancedSecurityLog(
SecurityEventType.AUTHENTICATION,
"login_attempt",
AuditOutcome.FAILURE,
{
userId: user.id,
companyId: user.companyId,
metadata: createAuditMetadata({
error: "invalid_password",
email: "[REDACTED]",
}),
},
AuditSeverity.HIGH,
"Invalid password",
{
attemptType: "invalid_password",
email: credentials.email,
endpoint: "/api/auth/signin",
userId: user.id,
}
);
return null;
}
// Check if company is active
if (user.company.status !== "ACTIVE") {
await enhancedSecurityLog(
SecurityEventType.AUTHENTICATION,
"login_attempt",
AuditOutcome.BLOCKED,
{
userId: user.id,
companyId: user.companyId,
metadata: createAuditMetadata({
error: "company_inactive",
companyStatus: user.company.status,
}),
},
AuditSeverity.HIGH,
`Company status is ${user.company.status}`,
{
attemptType: "company_inactive",
companyStatus: user.company.status,
endpoint: "/api/auth/signin",
}
);
return null;
}
// Log successful authentication
await enhancedSecurityLog(
SecurityEventType.AUTHENTICATION,
"login_success",
AuditOutcome.SUCCESS,
{
userId: user.id,
companyId: user.companyId,
metadata: createAuditMetadata({
userRole: user.role,
companyName: user.company.name,
}),
},
AuditSeverity.INFO,
undefined,
{
userRole: user.role,
companyName: user.company.name,
endpoint: "/api/auth/signin",
}
);
return {
id: user.id,
email: user.email,

646
lib/batchLogger.ts Normal file
View File

@ -0,0 +1,646 @@
/**
* Comprehensive Logging System for OpenAI Batch Processing Operations
*
* This module provides structured logging with different log levels,
* performance metrics tracking, and integration with security audit logging.
*/
import type { AIBatchRequestStatus, AIRequestStatus } from "@prisma/client";
import {
AuditOutcome,
AuditSeverity,
SecurityEventType,
securityAuditLogger,
} from "./securityAuditLogger";
export enum BatchLogLevel {
DEBUG = "DEBUG",
INFO = "INFO",
WARN = "WARN",
ERROR = "ERROR",
CRITICAL = "CRITICAL",
}
export enum BatchOperation {
BATCH_CREATION = "BATCH_CREATION",
BATCH_STATUS_CHECK = "BATCH_STATUS_CHECK",
BATCH_RESULT_PROCESSING = "BATCH_RESULT_PROCESSING",
FILE_UPLOAD = "FILE_UPLOAD",
FILE_DOWNLOAD = "FILE_DOWNLOAD",
CIRCUIT_BREAKER_ACTION = "CIRCUIT_BREAKER_ACTION",
RETRY_OPERATION = "RETRY_OPERATION",
SCHEDULER_ACTION = "SCHEDULER_ACTION",
INDIVIDUAL_REQUEST_RETRY = "INDIVIDUAL_REQUEST_RETRY",
COST_TRACKING = "COST_TRACKING",
}
export interface BatchLogContext {
operation: BatchOperation;
batchId?: string;
requestId?: string;
companyId?: string;
openaiBatchId?: string;
fileId?: string;
requestCount?: number;
retryAttempt?: number;
duration?: number;
statusBefore?: AIBatchRequestStatus | AIRequestStatus;
statusAfter?: AIBatchRequestStatus | AIRequestStatus;
errorCode?: string;
circuitBreakerState?: "OPEN" | "CLOSED" | "HALF_OPEN";
metadata?: Record<string, any>;
}
export interface BatchMetrics {
operationStartTime: number;
requestCount: number;
successCount: number;
failureCount: number;
retryCount: number;
totalCost: number;
averageLatency: number;
circuitBreakerTrips: number;
performanceStats: {
p50: number;
p95: number;
p99: number;
};
}
class BatchLoggerService {
private metrics: Map<string, BatchMetrics> = new Map();
private operationTimes: Map<string, number> = new Map();
private performanceBuffer: Map<BatchOperation, number[]> = new Map();
private readonly LOG_COLORS = {
[BatchLogLevel.DEBUG]: "\x1b[36m", // Cyan
[BatchLogLevel.INFO]: "\x1b[32m", // Green
[BatchLogLevel.WARN]: "\x1b[33m", // Yellow
[BatchLogLevel.ERROR]: "\x1b[31m", // Red
[BatchLogLevel.CRITICAL]: "\x1b[35m", // Magenta
};
private readonly RESET_COLOR = "\x1b[0m";
/**
* Log a batch processing event with structured data
*/
async log(
level: BatchLogLevel,
message: string,
context: BatchLogContext,
error?: Error
): Promise<void> {
const timestamp = new Date().toISOString();
const operationId = context.batchId || context.requestId || "unknown";
// Create structured log entry
const logEntry = {
timestamp,
level,
operation: context.operation,
message,
context: this.sanitizeContext(context),
error: error ? this.formatError(error) : undefined,
operationId,
};
// Console logging with colors (development)
if (process.env.NODE_ENV !== "production") {
this.logToConsole(logEntry);
}
// Structured logging (production)
this.logToStructured(logEntry);
// Security audit logging for important events
await this.logToSecurityAudit(level, message, context, error);
// Update metrics
this.updateMetrics(context, error);
// Performance tracking
this.trackPerformance(context);
}
/**
* Start timing an operation
*/
startOperation(operationId: string): void {
this.operationTimes.set(operationId, Date.now());
}
/**
* End timing an operation and return duration
*/
endOperation(operationId: string): number {
const startTime = this.operationTimes.get(operationId);
if (!startTime) return 0;
const duration = Date.now() - startTime;
this.operationTimes.delete(operationId);
return duration;
}
/**
* Log batch creation events
*/
async logBatchCreation(
companyId: string,
requestCount: number,
batchId?: string,
openaiBatchId?: string,
error?: Error
): Promise<void> {
const level = error ? BatchLogLevel.ERROR : BatchLogLevel.INFO;
const message = error
? `Failed to create batch for company ${companyId} with ${requestCount} requests`
: `Successfully created batch for company ${companyId} with ${requestCount} requests`;
await this.log(
level,
message,
{
operation: BatchOperation.BATCH_CREATION,
companyId,
batchId,
openaiBatchId,
requestCount,
},
error
);
}
/**
* Log batch status check events
*/
async logStatusCheck(
batchId: string,
openaiBatchId: string,
statusBefore: AIBatchRequestStatus,
statusAfter: AIBatchRequestStatus,
duration: number,
error?: Error
): Promise<void> {
const level = error ? BatchLogLevel.ERROR : BatchLogLevel.DEBUG;
const statusChanged = statusBefore !== statusAfter;
const message = error
? `Failed to check status for batch ${batchId}`
: statusChanged
? `Batch ${batchId} status changed from ${statusBefore} to ${statusAfter}`
: `Batch ${batchId} status remains ${statusAfter}`;
await this.log(
level,
message,
{
operation: BatchOperation.BATCH_STATUS_CHECK,
batchId,
openaiBatchId,
statusBefore,
statusAfter,
duration,
},
error
);
}
/**
* Log batch result processing events
*/
async logResultProcessing(
batchId: string,
openaiBatchId: string,
successCount: number,
failureCount: number,
duration: number,
error?: Error
): Promise<void> {
const level = error ? BatchLogLevel.ERROR : BatchLogLevel.INFO;
const totalProcessed = successCount + failureCount;
const message = error
? `Failed to process results for batch ${batchId}`
: `Processed ${totalProcessed} results for batch ${batchId} (${successCount} success, ${failureCount} failed)`;
await this.log(
level,
message,
{
operation: BatchOperation.BATCH_RESULT_PROCESSING,
batchId,
openaiBatchId,
duration,
metadata: { successCount, failureCount, totalProcessed },
},
error
);
}
/**
* Log circuit breaker events
*/
async logCircuitBreaker(
operation: string,
state: "OPEN" | "CLOSED" | "HALF_OPEN",
failures: number,
threshold: number
): Promise<void> {
const level = state === "OPEN" ? BatchLogLevel.WARN : BatchLogLevel.INFO;
const message = `Circuit breaker ${state.toLowerCase()} for ${operation} (${failures}/${threshold} failures)`;
await this.log(level, message, {
operation: BatchOperation.CIRCUIT_BREAKER_ACTION,
circuitBreakerState: state,
metadata: { operation, failures, threshold },
});
}
/**
* Log retry attempts
*/
async logRetry(
operation: BatchOperation,
operationName: string,
attempt: number,
maxRetries: number,
delay: number,
error: Error,
batchId?: string,
requestId?: string
): Promise<void> {
const level =
attempt === maxRetries ? BatchLogLevel.ERROR : BatchLogLevel.WARN;
const message =
attempt === maxRetries
? `Final retry failed for ${operationName} (${attempt}/${maxRetries})`
: `Retry attempt ${attempt}/${maxRetries} for ${operationName} (next retry in ${delay}ms)`;
await this.log(
level,
message,
{
operation,
batchId,
requestId,
retryAttempt: attempt,
metadata: { operationName, maxRetries, delay },
},
error
);
}
/**
* Log scheduler events
*/
async logScheduler(
action: string,
duration: number,
successCount: number,
errorCount: number,
error?: Error
): Promise<void> {
const level = error
? BatchLogLevel.ERROR
: errorCount > 0
? BatchLogLevel.WARN
: BatchLogLevel.INFO;
const message = `Scheduler ${action} completed in ${duration}ms (${successCount} success, ${errorCount} errors)`;
await this.log(
level,
message,
{
operation: BatchOperation.SCHEDULER_ACTION,
duration,
metadata: { action, successCount, errorCount },
},
error
);
}
/**
* Log cost tracking information
*/
async logCostTracking(
companyId: string,
requestCount: number,
totalCost: number,
tokenUsage: { prompt: number; completion: number; total: number },
batchId?: string
): Promise<void> {
const costPerRequest = totalCost / requestCount;
const message = `Cost tracking for ${requestCount} requests: €${totalCost.toFixed(4)} (€${costPerRequest.toFixed(4)} per request)`;
await this.log(BatchLogLevel.INFO, message, {
operation: BatchOperation.COST_TRACKING,
companyId,
batchId,
requestCount,
metadata: { totalCost, costPerRequest, tokenUsage },
});
}
/**
* Log performance metrics
*/
async logPerformanceMetrics(operation: BatchOperation): Promise<void> {
const timings = this.performanceBuffer.get(operation) || [];
if (timings.length === 0) return;
const sorted = [...timings].sort((a, b) => a - b);
const stats = {
count: sorted.length,
min: sorted[0],
max: sorted[sorted.length - 1],
avg: sorted.reduce((a, b) => a + b, 0) / sorted.length,
p50: sorted[Math.floor(sorted.length * 0.5)],
p95: sorted[Math.floor(sorted.length * 0.95)],
p99: sorted[Math.floor(sorted.length * 0.99)],
};
const message = `Performance metrics for ${operation}: avg=${stats.avg.toFixed(2)}ms, p95=${stats.p95}ms, p99=${stats.p99}ms`;
await this.log(BatchLogLevel.INFO, message, {
operation,
metadata: { performanceStats: stats },
});
// Clear buffer after reporting
this.performanceBuffer.delete(operation);
}
/**
* Get comprehensive metrics for monitoring
*/
getMetrics(companyId?: string): BatchMetrics | Record<string, BatchMetrics> {
if (companyId) {
return this.metrics.get(companyId) || this.createEmptyMetrics();
}
const allMetrics: Record<string, BatchMetrics> = {};
for (const [key, metrics] of this.metrics) {
allMetrics[key] = metrics;
}
return allMetrics;
}
/**
* Export logs for analysis (structured JSON format)
*/
exportLogs(timeRange: { start: Date; end: Date }): string {
// In production, this would read from persistent log storage
// For now, return current metrics as example
const exportData = {
exportTime: new Date().toISOString(),
timeRange,
metrics: Object.fromEntries(this.metrics),
performanceBuffers: Object.fromEntries(this.performanceBuffer),
summary: {
totalOperations: this.operationTimes.size,
activeOperations: this.operationTimes.size,
metricsTracked: this.metrics.size,
},
};
return JSON.stringify(exportData, null, 2);
}
/**
* Clear old metrics to prevent memory leaks
*/
cleanupMetrics(olderThanHours = 24): void {
const cutoff = Date.now() - olderThanHours * 60 * 60 * 1000;
for (const [key, metrics] of this.metrics) {
if (metrics.operationStartTime < cutoff) {
this.metrics.delete(key);
}
}
// Clear old operation times
for (const [operationId, startTime] of this.operationTimes) {
if (startTime < cutoff) {
this.operationTimes.delete(operationId);
}
}
console.log(
`Cleaned up batch processing metrics older than ${olderThanHours} hours`
);
}
private logToConsole(logEntry: any): void {
const color = this.LOG_COLORS[logEntry.level as BatchLogLevel] || "";
const prefix = `${color}[BATCH-${logEntry.level}]${this.RESET_COLOR}`;
console.log(`${prefix} ${logEntry.timestamp} ${logEntry.message}`);
if (logEntry.context && Object.keys(logEntry.context).length > 0) {
console.log(" Context:", this.formatContextForConsole(logEntry.context));
}
if (logEntry.error) {
console.log(" Error:", logEntry.error);
}
}
private logToStructured(logEntry: any): void {
// In production, this would write to structured logging service
// (e.g., Winston, Pino, or cloud logging service)
if (process.env.NODE_ENV === "production") {
// JSON structured logging for production
console.log(JSON.stringify(logEntry));
}
}
private async logToSecurityAudit(
level: BatchLogLevel,
_message: string,
context: BatchLogContext,
error?: Error
): Promise<void> {
// Log to security audit system for important events
if (level === BatchLogLevel.ERROR || level === BatchLogLevel.CRITICAL) {
await securityAuditLogger.log({
eventType: SecurityEventType.API_SECURITY,
action: `batch_processing_${context.operation.toLowerCase()}`,
outcome: error ? AuditOutcome.FAILURE : AuditOutcome.SUCCESS,
severity:
level === BatchLogLevel.CRITICAL
? AuditSeverity.CRITICAL
: AuditSeverity.HIGH,
errorMessage: error?.message,
context: {
companyId: context.companyId,
metadata: {
operation: context.operation,
batchId: context.batchId,
requestId: context.requestId,
retryAttempt: context.retryAttempt,
...context.metadata,
},
},
});
}
}
private updateMetrics(context: BatchLogContext, error?: Error): void {
const key = context.companyId || "global";
let metrics = this.metrics.get(key);
if (!metrics) {
metrics = this.createEmptyMetrics();
this.metrics.set(key, metrics);
}
if (error) {
metrics.failureCount++;
} else {
metrics.successCount++;
}
if (context.retryAttempt) {
metrics.retryCount++;
}
if (context.operation === BatchOperation.CIRCUIT_BREAKER_ACTION) {
metrics.circuitBreakerTrips++;
}
if (context.duration) {
const operationCount = metrics.successCount + metrics.failureCount;
metrics.averageLatency =
(metrics.averageLatency * (operationCount - 1) + context.duration) /
operationCount;
}
// Update request count if provided
if (context.requestCount) {
metrics.requestCount += context.requestCount;
}
}
private trackPerformance(context: BatchLogContext): void {
if (context.duration && context.operation) {
const timings = this.performanceBuffer.get(context.operation) || [];
timings.push(context.duration);
// Keep only last 100 measurements to prevent memory issues
if (timings.length > 100) {
timings.splice(0, timings.length - 100);
}
this.performanceBuffer.set(context.operation, timings);
}
}
private createEmptyMetrics(): BatchMetrics {
return {
operationStartTime: Date.now(),
requestCount: 0,
successCount: 0,
failureCount: 0,
retryCount: 0,
totalCost: 0,
averageLatency: 0,
circuitBreakerTrips: 0,
performanceStats: { p50: 0, p95: 0, p99: 0 },
};
}
private sanitizeContext(context: BatchLogContext): any {
// Remove sensitive information from context before logging
const sanitized = { ...context };
delete sanitized.metadata?.apiKey;
delete sanitized.metadata?.credentials;
return sanitized;
}
private formatError(error: Error): any {
return {
name: error.name,
message: error.message,
stack: process.env.NODE_ENV === "development" ? error.stack : undefined,
cause: error.cause ? String(error.cause) : undefined,
};
}
private formatContextForConsole(context: any): string {
const important = {
operation: context.operation,
batchId: context.batchId,
requestId: context.requestId,
companyId: context.companyId,
requestCount: context.requestCount,
duration: context.duration ? `${context.duration}ms` : undefined,
retryAttempt: context.retryAttempt,
circuitBreakerState: context.circuitBreakerState,
};
// Filter out undefined values
const filtered = Object.fromEntries(
Object.entries(important).filter(([_, value]) => value !== undefined)
);
return JSON.stringify(filtered, null, 2);
}
}
// Singleton instance for global use
export const batchLogger = new BatchLoggerService();
// Start cleanup interval
setInterval(
() => {
batchLogger.cleanupMetrics();
},
60 * 60 * 1000
); // Every hour
// Helper functions for common logging patterns
export const logBatchOperation = async (
operation: BatchOperation,
operationId: string,
fn: () => Promise<any>,
context: Partial<BatchLogContext> = {}
): Promise<any> => {
batchLogger.startOperation(operationId);
try {
const result = await fn();
const duration = batchLogger.endOperation(operationId);
await batchLogger.log(
BatchLogLevel.INFO,
`${operation} completed successfully`,
{
operation,
duration,
...context,
}
);
return result;
} catch (error) {
const duration = batchLogger.endOperation(operationId);
await batchLogger.log(
BatchLogLevel.ERROR,
`${operation} failed`,
{
operation,
duration,
...context,
},
error as Error
);
throw error;
}
};
export const logBatchMetrics = async (
operation: BatchOperation
): Promise<void> => {
await batchLogger.logPerformanceMetrics(operation);
};

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,340 @@
/**
* Batch Processor Integration Layer
*
* This module provides a unified interface that can switch between
* the original and optimized batch processing implementations based
* on environment configuration or runtime decisions.
*/
import { BatchLogLevel, BatchOperation, batchLogger } from "./batchLogger";
// Import both implementations
import * as OriginalProcessor from "./batchProcessor";
import * as OptimizedProcessor from "./batchProcessorOptimized";
import * as OriginalScheduler from "./batchScheduler";
import * as OptimizedScheduler from "./batchSchedulerOptimized";
/**
* Configuration for batch processing optimization
*/
const OPTIMIZATION_CONFIG = {
// Enable optimized queries (can be controlled via environment)
ENABLE_QUERY_OPTIMIZATION: process.env.ENABLE_BATCH_OPTIMIZATION !== "false",
// Enable batch operations across companies
ENABLE_BATCH_OPERATIONS: process.env.ENABLE_BATCH_OPERATIONS !== "false",
// Enable parallel processing
ENABLE_PARALLEL_PROCESSING:
process.env.ENABLE_PARALLEL_PROCESSING !== "false",
// Fallback to original on errors
FALLBACK_ON_ERRORS: process.env.FALLBACK_ON_ERRORS !== "false",
} as const;
/**
* Performance tracking for optimization decisions
*/
class PerformanceTracker {
private metrics = {
optimized: { totalTime: 0, operationCount: 0, errorCount: 0 },
original: { totalTime: 0, operationCount: 0, errorCount: 0 },
};
recordOperation(
type: "optimized" | "original",
duration: number,
success: boolean
): void {
this.metrics[type].totalTime += duration;
this.metrics[type].operationCount++;
if (!success) {
this.metrics[type].errorCount++;
}
}
getAverageTime(type: "optimized" | "original"): number {
const metric = this.metrics[type];
return metric.operationCount > 0
? metric.totalTime / metric.operationCount
: 0;
}
getSuccessRate(type: "optimized" | "original"): number {
const metric = this.metrics[type];
if (metric.operationCount === 0) return 1;
return (metric.operationCount - metric.errorCount) / metric.operationCount;
}
shouldUseOptimized(): boolean {
if (!OPTIMIZATION_CONFIG.ENABLE_QUERY_OPTIMIZATION) return false;
// If we don't have enough data, use optimized
if (this.metrics.optimized.operationCount < 5) return true;
// Use optimized if it's faster and has good success rate
const optimizedAvg = this.getAverageTime("optimized");
const originalAvg = this.getAverageTime("original");
const optimizedSuccess = this.getSuccessRate("optimized");
return optimizedAvg < originalAvg && optimizedSuccess > 0.9;
}
getStats() {
return {
optimized: {
averageTime: this.getAverageTime("optimized"),
successRate: this.getSuccessRate("optimized"),
...this.metrics.optimized,
},
original: {
averageTime: this.getAverageTime("original"),
successRate: this.getSuccessRate("original"),
...this.metrics.original,
},
};
}
}
const performanceTracker = new PerformanceTracker();
/**
* Wrapper function to execute with performance tracking
*/
async function executeWithTracking<T>(
optimizedFn: () => Promise<T>,
originalFn: () => Promise<T>,
operationName: string
): Promise<T> {
const useOptimized = performanceTracker.shouldUseOptimized();
const startTime = Date.now();
try {
let result: T;
if (useOptimized) {
await batchLogger.log(
BatchLogLevel.DEBUG,
`Using optimized implementation for ${operationName}`,
{
operation: BatchOperation.SCHEDULER_ACTION,
metadata: { operationName },
}
);
result = await optimizedFn();
performanceTracker.recordOperation(
"optimized",
Date.now() - startTime,
true
);
} else {
await batchLogger.log(
BatchLogLevel.DEBUG,
`Using original implementation for ${operationName}`,
{
operation: BatchOperation.SCHEDULER_ACTION,
metadata: { operationName },
}
);
result = await originalFn();
performanceTracker.recordOperation(
"original",
Date.now() - startTime,
true
);
}
return result;
} catch (error) {
const duration = Date.now() - startTime;
if (useOptimized) {
performanceTracker.recordOperation("optimized", duration, false);
if (OPTIMIZATION_CONFIG.FALLBACK_ON_ERRORS) {
await batchLogger.log(
BatchLogLevel.WARN,
`Optimized ${operationName} failed, falling back to original implementation`,
{
operation: BatchOperation.SCHEDULER_ACTION,
metadata: { operationName },
},
error as Error
);
try {
const result = await originalFn();
performanceTracker.recordOperation(
"original",
Date.now() - startTime,
true
);
return result;
} catch (fallbackError) {
performanceTracker.recordOperation(
"original",
Date.now() - startTime,
false
);
throw fallbackError;
}
}
} else {
performanceTracker.recordOperation("original", duration, false);
}
throw error;
}
}
/**
* Unified interface for batch processing operations
*/
export class IntegratedBatchProcessor {
/**
* Get pending batch requests with automatic optimization
*/
static async getPendingBatchRequests(companyId: string, limit?: number) {
return executeWithTracking(
() =>
OptimizedProcessor.getPendingBatchRequestsOptimized(companyId, limit),
() => OriginalProcessor.getPendingBatchRequests(companyId, limit),
"getPendingBatchRequests"
);
}
/**
* Get batch processing statistics with optimization
*/
static async getBatchProcessingStats(companyId?: string) {
return executeWithTracking(
() => OptimizedProcessor.getBatchProcessingStatsOptimized(companyId),
() => OriginalProcessor.getBatchProcessingStats(companyId || ""),
"getBatchProcessingStats"
);
}
/**
* Check if we should create a batch for a company
*/
static async shouldCreateBatch(
companyId: string,
pendingCount: number
): Promise<boolean> {
if (performanceTracker.shouldUseOptimized()) {
// Always create if we have enough requests
if (pendingCount >= 10) {
// MIN_BATCH_SIZE
return true;
}
// Check if oldest pending request is old enough (optimized query)
const oldestPending =
await OptimizedProcessor.getOldestPendingRequestOptimized(companyId);
if (!oldestPending) {
return false;
}
const waitTimeMs = Date.now() - oldestPending.requestedAt.getTime();
const maxWaitTimeMs = 30 * 60 * 1000; // MAX_WAIT_TIME_MINUTES
return waitTimeMs >= maxWaitTimeMs;
}
// Use original implementation logic
return false; // Simplified fallback
}
/**
* Start the appropriate scheduler based on configuration
*/
static startScheduler(): void {
if (OPTIMIZATION_CONFIG.ENABLE_QUERY_OPTIMIZATION) {
OptimizedScheduler.startOptimizedBatchScheduler();
} else {
OriginalScheduler.startBatchScheduler();
}
}
/**
* Stop the appropriate scheduler
*/
static stopScheduler(): void {
if (OPTIMIZATION_CONFIG.ENABLE_QUERY_OPTIMIZATION) {
OptimizedScheduler.stopOptimizedBatchScheduler();
} else {
OriginalScheduler.stopBatchScheduler();
}
}
/**
* Get scheduler status with optimization info
*/
static getSchedulerStatus() {
const baseStatus = OPTIMIZATION_CONFIG.ENABLE_QUERY_OPTIMIZATION
? OptimizedScheduler.getOptimizedBatchSchedulerStatus()
: OriginalScheduler.getBatchSchedulerStatus();
return {
...baseStatus,
optimization: {
enabled: OPTIMIZATION_CONFIG.ENABLE_QUERY_OPTIMIZATION,
config: OPTIMIZATION_CONFIG,
performance: performanceTracker.getStats(),
},
};
}
/**
* Force invalidate caches (useful for testing or manual intervention)
*/
static invalidateCaches(): void {
if (OPTIMIZATION_CONFIG.ENABLE_QUERY_OPTIMIZATION) {
OptimizedProcessor.invalidateCompanyCache();
}
}
/**
* Get cache statistics
*/
static getCacheStats() {
if (OPTIMIZATION_CONFIG.ENABLE_QUERY_OPTIMIZATION) {
return OptimizedProcessor.getCompanyCacheStats();
}
return null;
}
/**
* Reset performance tracking (useful for testing)
*/
static resetPerformanceTracking(): void {
performanceTracker.metrics = {
optimized: { totalTime: 0, operationCount: 0, errorCount: 0 },
original: { totalTime: 0, operationCount: 0, errorCount: 0 },
};
}
}
/**
* Export unified functions that can be used as drop-in replacements
*/
export const getPendingBatchRequests =
IntegratedBatchProcessor.getPendingBatchRequests;
export const getBatchProcessingStats =
IntegratedBatchProcessor.getBatchProcessingStats;
export const startBatchScheduler = IntegratedBatchProcessor.startScheduler;
export const stopBatchScheduler = IntegratedBatchProcessor.stopScheduler;
export const getBatchSchedulerStatus =
IntegratedBatchProcessor.getSchedulerStatus;
/**
* Log optimization configuration on module load
*/
(async () => {
await batchLogger.log(
BatchLogLevel.INFO,
"Batch processor integration initialized",
{
operation: BatchOperation.SCHEDULER_ACTION,
metadata: {
optimizationEnabled: OPTIMIZATION_CONFIG.ENABLE_QUERY_OPTIMIZATION,
config: OPTIMIZATION_CONFIG,
},
}
);
})();

View File

@ -0,0 +1,500 @@
/**
* Optimized Database Queries for OpenAI Batch Processing
*
* This module provides optimized versions of batch processing queries
* with improved performance through:
* - Reduced data fetching with selective includes
* - Company caching to eliminate redundant lookups
* - Batch operations to reduce N+1 queries
* - Query result pooling and reuse
*/
import {
AIBatchRequestStatus,
type AIProcessingRequest,
AIRequestStatus,
} from "@prisma/client";
import { BatchLogLevel, BatchOperation, batchLogger } from "./batchLogger";
import { prisma } from "./prisma";
/**
* Cache for active companies to reduce database lookups
*/
interface CachedCompany {
id: string;
name: string;
cachedAt: number;
}
class CompanyCache {
private cache = new Map<string, CachedCompany>();
private allActiveCompanies: CachedCompany[] | null = null;
private allActiveCompaniesCachedAt = 0;
private readonly CACHE_TTL = 5 * 60 * 1000; // 5 minutes
async getActiveCompanies(): Promise<CachedCompany[]> {
const now = Date.now();
if (
this.allActiveCompanies &&
now - this.allActiveCompaniesCachedAt < this.CACHE_TTL
) {
return this.allActiveCompanies;
}
const companies = await prisma.company.findMany({
where: { status: "ACTIVE" },
select: { id: true, name: true },
});
this.allActiveCompanies = companies.map((company) => ({
...company,
cachedAt: now,
}));
this.allActiveCompaniesCachedAt = now;
await batchLogger.log(
BatchLogLevel.DEBUG,
`Refreshed company cache with ${companies.length} active companies`,
{
operation: BatchOperation.SCHEDULER_ACTION,
requestCount: companies.length,
}
);
return this.allActiveCompanies;
}
invalidate(): void {
this.cache.clear();
this.allActiveCompanies = null;
this.allActiveCompaniesCachedAt = 0;
}
}
const companyCache = new CompanyCache();
/**
* Optimized version of getPendingBatchRequests with minimal data fetching
*/
export async function getPendingBatchRequestsOptimized(
companyId: string,
limit = 1000
): Promise<AIProcessingRequest[]> {
const startTime = Date.now();
// Use a more efficient query that only fetches what we need
const requests = await prisma.aIProcessingRequest.findMany({
where: {
session: { companyId },
processingStatus: AIRequestStatus.PENDING_BATCHING,
batchId: null,
},
// Only include essential session data, not all messages
include: {
session: {
select: {
id: true,
companyId: true,
// Only include message count, not full messages
_count: {
select: { messages: true },
},
},
},
},
take: limit,
orderBy: {
requestedAt: "asc",
},
});
const duration = Date.now() - startTime;
await batchLogger.log(
BatchLogLevel.DEBUG,
`Retrieved ${requests.length} pending batch requests for company ${companyId} in ${duration}ms`,
{
operation: BatchOperation.BATCH_CREATION,
companyId,
requestCount: requests.length,
duration,
}
);
return requests as any; // Type assertion since we're only including essential data
}
/**
* Batch operation to get pending requests for multiple companies
*/
export async function getPendingBatchRequestsForAllCompanies(): Promise<
Map<string, AIProcessingRequest[]>
> {
const startTime = Date.now();
const companies = await companyCache.getActiveCompanies();
if (companies.length === 0) {
return new Map();
}
// Single query to get all pending requests for all companies
const allRequests = await prisma.aIProcessingRequest.findMany({
where: {
session: {
companyId: { in: companies.map((c) => c.id) },
},
processingStatus: AIRequestStatus.PENDING_BATCHING,
batchId: null,
},
include: {
session: {
select: {
id: true,
companyId: true,
_count: { select: { messages: true } },
},
},
},
orderBy: { requestedAt: "asc" },
});
// Group requests by company
const requestsByCompany = new Map<string, AIProcessingRequest[]>();
for (const request of allRequests) {
const companyId = request.session?.companyId;
if (!companyId) continue;
if (!requestsByCompany.has(companyId)) {
requestsByCompany.set(companyId, []);
}
requestsByCompany.get(companyId)?.push(request as any);
}
const duration = Date.now() - startTime;
await batchLogger.log(
BatchLogLevel.INFO,
`Retrieved pending requests for ${companies.length} companies (${allRequests.length} total requests) in ${duration}ms`,
{
operation: BatchOperation.BATCH_CREATION,
requestCount: allRequests.length,
duration,
}
);
return requestsByCompany;
}
/**
* Optimized batch status checking for all companies
*/
export async function getInProgressBatchesForAllCompanies(): Promise<
Map<string, any[]>
> {
const startTime = Date.now();
const companies = await companyCache.getActiveCompanies();
if (companies.length === 0) {
return new Map();
}
// Single query for all companies
const allBatches = await prisma.aIBatchRequest.findMany({
where: {
companyId: { in: companies.map((c) => c.id) },
status: {
in: [
AIBatchRequestStatus.IN_PROGRESS,
AIBatchRequestStatus.VALIDATING,
AIBatchRequestStatus.FINALIZING,
],
},
},
select: {
id: true,
companyId: true,
openaiBatchId: true,
status: true,
createdAt: true,
},
});
// Group by company
const batchesByCompany = new Map<string, any[]>();
for (const batch of allBatches) {
if (!batchesByCompany.has(batch.companyId)) {
batchesByCompany.set(batch.companyId, []);
}
batchesByCompany.get(batch.companyId)?.push(batch);
}
const duration = Date.now() - startTime;
await batchLogger.log(
BatchLogLevel.DEBUG,
`Retrieved in-progress batches for ${companies.length} companies (${allBatches.length} total batches) in ${duration}ms`,
{
operation: BatchOperation.BATCH_STATUS_CHECK,
requestCount: allBatches.length,
duration,
}
);
return batchesByCompany;
}
/**
* Optimized completed batch processing for all companies
*/
export async function getCompletedBatchesForAllCompanies(): Promise<
Map<string, any[]>
> {
const startTime = Date.now();
const companies = await companyCache.getActiveCompanies();
if (companies.length === 0) {
return new Map();
}
// Single query for all companies with minimal includes
const allBatches = await prisma.aIBatchRequest.findMany({
where: {
companyId: { in: companies.map((c) => c.id) },
status: AIBatchRequestStatus.COMPLETED,
outputFileId: { not: null },
},
select: {
id: true,
companyId: true,
openaiBatchId: true,
outputFileId: true,
status: true,
createdAt: true,
// Only get request IDs, not full request data
processingRequests: {
select: {
id: true,
sessionId: true,
processingStatus: true,
},
},
},
});
// Group by company
const batchesByCompany = new Map<string, any[]>();
for (const batch of allBatches) {
if (!batchesByCompany.has(batch.companyId)) {
batchesByCompany.set(batch.companyId, []);
}
batchesByCompany.get(batch.companyId)?.push(batch);
}
const duration = Date.now() - startTime;
await batchLogger.log(
BatchLogLevel.DEBUG,
`Retrieved completed batches for ${companies.length} companies (${allBatches.length} total batches) in ${duration}ms`,
{
operation: BatchOperation.BATCH_RESULT_PROCESSING,
requestCount: allBatches.length,
duration,
}
);
return batchesByCompany;
}
/**
* Optimized failed request retry for all companies
*/
export async function getFailedRequestsForAllCompanies(
maxPerCompany = 10
): Promise<Map<string, AIProcessingRequest[]>> {
const startTime = Date.now();
const companies = await companyCache.getActiveCompanies();
if (companies.length === 0) {
return new Map();
}
// Get failed requests for all companies in a single query
const allFailedRequests = await prisma.aIProcessingRequest.findMany({
where: {
session: {
companyId: { in: companies.map((c) => c.id) },
},
processingStatus: AIRequestStatus.PROCESSING_FAILED,
},
include: {
session: {
select: {
id: true,
companyId: true,
_count: { select: { messages: true } },
},
},
},
orderBy: { requestedAt: "asc" },
});
// Group by company and limit per company
const requestsByCompany = new Map<string, AIProcessingRequest[]>();
for (const request of allFailedRequests) {
const companyId = request.session?.companyId;
if (!companyId) continue;
if (!requestsByCompany.has(companyId)) {
requestsByCompany.set(companyId, []);
}
const companyRequests = requestsByCompany.get(companyId)!;
if (companyRequests.length < maxPerCompany) {
companyRequests.push(request as any);
}
}
const duration = Date.now() - startTime;
const totalRequests = Array.from(requestsByCompany.values()).reduce(
(sum, requests) => sum + requests.length,
0
);
await batchLogger.log(
BatchLogLevel.DEBUG,
`Retrieved failed requests for ${companies.length} companies (${totalRequests} total requests) in ${duration}ms`,
{
operation: BatchOperation.INDIVIDUAL_REQUEST_RETRY,
requestCount: totalRequests,
duration,
}
);
return requestsByCompany;
}
/**
* Optimized check for oldest pending request with minimal data
*/
export async function getOldestPendingRequestOptimized(
companyId: string
): Promise<{ requestedAt: Date } | null> {
const startTime = Date.now();
// Only fetch the timestamp we need
const oldestPending = await prisma.aIProcessingRequest.findFirst({
where: {
session: { companyId },
processingStatus: AIRequestStatus.PENDING_BATCHING,
},
select: { requestedAt: true },
orderBy: { requestedAt: "asc" },
});
const duration = Date.now() - startTime;
await batchLogger.log(
BatchLogLevel.DEBUG,
`Retrieved oldest pending request timestamp for company ${companyId} in ${duration}ms`,
{
operation: BatchOperation.SCHEDULER_ACTION,
companyId,
duration,
}
);
return oldestPending;
}
/**
* Batch statistics query optimization
*/
export async function getBatchProcessingStatsOptimized(
companyId?: string
): Promise<any> {
const startTime = Date.now();
const whereClause = companyId ? { companyId } : {};
// Use aggregation instead of loading individual records
const [
totalBatches,
pendingRequests,
inProgressBatches,
completedBatches,
failedRequests,
] = await Promise.all([
prisma.aIBatchRequest.count({ where: whereClause }),
prisma.aIProcessingRequest.count({
where: {
...(companyId && { session: { companyId } }),
processingStatus: AIRequestStatus.PENDING_BATCHING,
},
}),
prisma.aIBatchRequest.count({
where: {
...whereClause,
status: {
in: [
AIBatchRequestStatus.IN_PROGRESS,
AIBatchRequestStatus.VALIDATING,
AIBatchRequestStatus.FINALIZING,
],
},
},
}),
prisma.aIBatchRequest.count({
where: {
...whereClause,
status: AIBatchRequestStatus.COMPLETED,
},
}),
prisma.aIProcessingRequest.count({
where: {
...(companyId && { session: { companyId } }),
processingStatus: AIRequestStatus.PROCESSING_FAILED,
},
}),
]);
const duration = Date.now() - startTime;
const stats = {
totalBatches,
pendingRequests,
inProgressBatches,
completedBatches,
failedRequests,
};
await batchLogger.log(
BatchLogLevel.DEBUG,
`Retrieved batch processing stats ${companyId ? `for company ${companyId}` : "globally"} in ${duration}ms`,
{
operation: BatchOperation.SCHEDULER_ACTION,
companyId,
duration,
metadata: stats,
}
);
return stats;
}
/**
* Utility to invalidate company cache (call when companies are added/removed/status changed)
*/
export function invalidateCompanyCache(): void {
companyCache.invalidate();
}
/**
* Get cache statistics for monitoring
*/
export function getCompanyCacheStats() {
return {
isActive: companyCache.allActiveCompanies !== null,
cachedAt: new Date(companyCache.allActiveCompaniesCachedAt),
cacheSize: companyCache.allActiveCompanies?.length || 0,
};
}

View File

@ -8,18 +8,21 @@
*/
import cron, { type ScheduledTask } from "node-cron";
import { batchLogger } from "./batchLogger";
import {
checkBatchStatuses,
createBatchRequest,
getBatchProcessingStats,
getCircuitBreakerStatus,
getPendingBatchRequests,
processCompletedBatches,
retryFailedRequests,
} from "./batchProcessor";
import { prisma } from "./prisma";
import { getSchedulerConfig } from "./schedulerConfig";
/**
* Configuration for batch scheduler intervals
* Configuration for batch scheduler intervals with enhanced error handling
*/
const SCHEDULER_CONFIG = {
// Check for new batches to create every 5 minutes
@ -28,15 +31,27 @@ const SCHEDULER_CONFIG = {
CHECK_STATUS_INTERVAL: "*/2 * * * *",
// Process completed batches every minute
PROCESS_RESULTS_INTERVAL: "* * * * *",
// Retry failed individual requests every 10 minutes
RETRY_FAILED_INTERVAL: "*/10 * * * *",
// Minimum batch size to trigger creation
MIN_BATCH_SIZE: 10,
// Maximum time to wait before creating a batch (even if under min size)
MAX_WAIT_TIME_MINUTES: 30,
// Maximum consecutive errors before pausing scheduler
MAX_CONSECUTIVE_ERRORS: 5,
// Pause duration when too many errors occur (in milliseconds)
ERROR_PAUSE_DURATION: 15 * 60 * 1000, // 15 minutes
} as const;
let createBatchesTask: ScheduledTask | null = null;
let checkStatusTask: ScheduledTask | null = null;
let processResultsTask: ScheduledTask | null = null;
let retryFailedTask: ScheduledTask | null = null;
// Error tracking for scheduler resilience
let consecutiveErrors = 0;
let lastErrorTime = 0;
let isPaused = false;
/**
* Start the batch processing scheduler
@ -59,45 +74,44 @@ export function startBatchScheduler(): void {
// Schedule batch creation
createBatchesTask = cron.schedule(
SCHEDULER_CONFIG.CREATE_BATCHES_INTERVAL,
async () => {
try {
await createBatchesForAllCompanies();
} catch (error) {
console.error("Error in batch creation scheduler:", error);
}
}
() => handleSchedulerTask(createBatchesForAllCompanies, "batch creation")
);
// Schedule status checking
checkStatusTask = cron.schedule(
SCHEDULER_CONFIG.CHECK_STATUS_INTERVAL,
async () => {
try {
await checkBatchStatusesForAllCompanies();
} catch (error) {
console.error("Error in batch status checker:", error);
}
}
checkStatusTask = cron.schedule(SCHEDULER_CONFIG.CHECK_STATUS_INTERVAL, () =>
handleSchedulerTask(
checkBatchStatusesForAllCompanies,
"batch status checking"
)
);
// Schedule result processing
processResultsTask = cron.schedule(
SCHEDULER_CONFIG.PROCESS_RESULTS_INTERVAL,
async () => {
try {
await processCompletedBatchesForAllCompanies();
} catch (error) {
console.error("Error in batch result processor:", error);
}
}
() =>
handleSchedulerTask(
processCompletedBatchesForAllCompanies,
"batch result processing"
)
);
// Schedule failed request retry
retryFailedTask = cron.schedule(SCHEDULER_CONFIG.RETRY_FAILED_INTERVAL, () =>
handleSchedulerTask(
retryFailedRequestsForAllCompanies,
"failed request retry"
)
);
// Start all tasks
createBatchesTask.start();
checkStatusTask.start();
processResultsTask.start();
retryFailedTask.start();
console.log("Batch scheduler started successfully");
console.log(
"Batch scheduler started successfully with enhanced error handling"
);
}
/**
@ -124,6 +138,12 @@ export function stopBatchScheduler(): void {
processResultsTask = null;
}
if (retryFailedTask) {
retryFailedTask.stop();
retryFailedTask.destroy();
retryFailedTask = null;
}
console.log("Batch scheduler stopped");
}
@ -285,10 +305,115 @@ export async function forceBatchCreation(companyId: string): Promise<void> {
*/
export function getBatchSchedulerStatus() {
return {
isRunning: !!(createBatchesTask && checkStatusTask && processResultsTask),
isRunning: !!(
createBatchesTask &&
checkStatusTask &&
processResultsTask &&
retryFailedTask
),
createBatchesRunning: !!createBatchesTask,
checkStatusRunning: !!checkStatusTask,
processResultsRunning: !!processResultsTask,
retryFailedRunning: !!retryFailedTask,
isPaused,
consecutiveErrors,
lastErrorTime: lastErrorTime ? new Date(lastErrorTime) : null,
circuitBreakers: getCircuitBreakerStatus(),
config: SCHEDULER_CONFIG,
};
}
/**
* Handle scheduler task execution with error tracking and recovery
*/
async function handleSchedulerTask(
taskFunction: () => Promise<void>,
taskName: string
): Promise<void> {
// Check if scheduler is paused due to too many errors
if (isPaused) {
const now = Date.now();
if (now - lastErrorTime >= SCHEDULER_CONFIG.ERROR_PAUSE_DURATION) {
console.log(`Resuming scheduler after error pause: ${taskName}`);
isPaused = false;
consecutiveErrors = 0;
} else {
console.log(`Scheduler paused due to errors, skipping: ${taskName}`);
return;
}
}
const startTime = Date.now();
let successCount = 0;
let errorCount = 0;
try {
await taskFunction();
successCount = 1;
// Reset error counter on success
if (consecutiveErrors > 0) {
console.log(
`Scheduler recovered after ${consecutiveErrors} consecutive errors: ${taskName}`
);
consecutiveErrors = 0;
}
} catch (error) {
consecutiveErrors++;
lastErrorTime = Date.now();
errorCount = 1;
console.error(
`Error in ${taskName} (attempt ${consecutiveErrors}):`,
error
);
// Pause scheduler if too many consecutive errors
if (consecutiveErrors >= SCHEDULER_CONFIG.MAX_CONSECUTIVE_ERRORS) {
isPaused = true;
console.error(
`Pausing scheduler for ${SCHEDULER_CONFIG.ERROR_PAUSE_DURATION / 1000 / 60} minutes due to ${consecutiveErrors} consecutive errors`
);
}
} finally {
const duration = Date.now() - startTime;
await batchLogger.logScheduler(
taskName,
duration,
successCount,
errorCount,
errorCount > 0
? new Error(`Scheduler task ${taskName} failed`)
: undefined
);
}
}
/**
* Retry failed individual requests for all companies
*/
async function retryFailedRequestsForAllCompanies(): Promise<void> {
try {
const companies = await prisma.company.findMany({
where: { status: "ACTIVE" },
select: { id: true },
});
for (const company of companies) {
await retryFailedRequests(company.id);
}
} catch (error) {
console.error("Failed to retry failed requests:", error);
throw error; // Re-throw to trigger error handling
}
}
/**
* Force resume scheduler (for manual recovery)
*/
export function forceResumeScheduler(): void {
isPaused = false;
consecutiveErrors = 0;
lastErrorTime = 0;
console.log("Scheduler manually resumed, error counters reset");
}

View File

@ -0,0 +1,516 @@
/**
* Optimized OpenAI Batch Processing Scheduler
*
* This optimized version reduces database load through:
* - Batch operations across all companies
* - Company caching to eliminate repeated lookups
* - Parallel processing with better error isolation
* - More efficient query patterns
*/
import cron, { type ScheduledTask } from "node-cron";
import { BatchOperation, batchLogger } from "./batchLogger";
import {
checkBatchStatuses,
createBatchRequest,
getCircuitBreakerStatus,
processCompletedBatches,
retryFailedRequests,
} from "./batchProcessor";
import {
getCompletedBatchesForAllCompanies,
getFailedRequestsForAllCompanies,
getInProgressBatchesForAllCompanies,
getOldestPendingRequestOptimized,
getPendingBatchRequestsForAllCompanies,
} from "./batchProcessorOptimized";
import { getSchedulerConfig } from "./schedulerConfig";
/**
* Enhanced configuration with optimization flags
*/
const SCHEDULER_CONFIG = {
// Check for new batches to create every 5 minutes
CREATE_BATCHES_INTERVAL: "*/5 * * * *",
// Check batch statuses every 2 minutes
CHECK_STATUS_INTERVAL: "*/2 * * * *",
// Process completed batches every minute
PROCESS_RESULTS_INTERVAL: "* * * * *",
// Retry failed individual requests every 10 minutes
RETRY_FAILED_INTERVAL: "*/10 * * * *",
// Minimum batch size to trigger creation
MIN_BATCH_SIZE: 10,
// Maximum time to wait before creating a batch (even if under min size)
MAX_WAIT_TIME_MINUTES: 30,
// Maximum consecutive errors before pausing scheduler
MAX_CONSECUTIVE_ERRORS: 5,
// Pause duration when too many errors occur (in milliseconds)
ERROR_PAUSE_DURATION: 15 * 60 * 1000, // 15 minutes
// Performance optimization flags
USE_BATCH_OPERATIONS: true,
PARALLEL_COMPANY_PROCESSING: true,
MAX_CONCURRENT_COMPANIES: 5,
} as const;
let createBatchesTask: ScheduledTask | null = null;
let checkStatusTask: ScheduledTask | null = null;
let processResultsTask: ScheduledTask | null = null;
let retryFailedTask: ScheduledTask | null = null;
// Enhanced error tracking with performance monitoring
let consecutiveErrors = 0;
let lastErrorTime = 0;
let isPaused = false;
let totalOperationTime = 0;
let operationCount = 0;
/**
* Start the optimized batch processing scheduler
*/
export function startOptimizedBatchScheduler(): void {
const config = getSchedulerConfig();
if (!config.enabled) {
console.log("Batch scheduler disabled by configuration");
return;
}
if (!process.env.OPENAI_API_KEY) {
console.log("Batch scheduler disabled: OPENAI_API_KEY not configured");
return;
}
console.log("Starting Optimized OpenAI Batch Processing Scheduler...");
// Schedule optimized batch creation
createBatchesTask = cron.schedule(
SCHEDULER_CONFIG.CREATE_BATCHES_INTERVAL,
() =>
handleSchedulerTask(createBatchesOptimized, "optimized batch creation")
);
// Schedule optimized status checking
checkStatusTask = cron.schedule(SCHEDULER_CONFIG.CHECK_STATUS_INTERVAL, () =>
handleSchedulerTask(
checkBatchStatusesOptimized,
"optimized batch status checking"
)
);
// Schedule optimized result processing
processResultsTask = cron.schedule(
SCHEDULER_CONFIG.PROCESS_RESULTS_INTERVAL,
() =>
handleSchedulerTask(
processCompletedBatchesOptimized,
"optimized batch result processing"
)
);
// Schedule optimized failed request retry
retryFailedTask = cron.schedule(SCHEDULER_CONFIG.RETRY_FAILED_INTERVAL, () =>
handleSchedulerTask(
retryFailedRequestsOptimized,
"optimized failed request retry"
)
);
// Start all tasks
createBatchesTask.start();
checkStatusTask.start();
processResultsTask.start();
retryFailedTask.start();
console.log("Optimized batch scheduler started successfully");
}
/**
* Stop the optimized batch processing scheduler
*/
export function stopOptimizedBatchScheduler(): void {
console.log("Stopping optimized batch scheduler...");
const tasks = [
{ task: createBatchesTask, name: "createBatchesTask" },
{ task: checkStatusTask, name: "checkStatusTask" },
{ task: processResultsTask, name: "processResultsTask" },
{ task: retryFailedTask, name: "retryFailedTask" },
];
for (const { task, name: _name } of tasks) {
if (task) {
task.stop();
task.destroy();
}
}
createBatchesTask = null;
checkStatusTask = null;
processResultsTask = null;
retryFailedTask = null;
console.log("Optimized batch scheduler stopped");
}
/**
* Optimized batch creation for all companies
*/
async function createBatchesOptimized(): Promise<void> {
const startTime = Date.now();
if (SCHEDULER_CONFIG.USE_BATCH_OPERATIONS) {
// Single query to get pending requests for all companies
const pendingRequestsByCompany =
await getPendingBatchRequestsForAllCompanies();
if (pendingRequestsByCompany.size === 0) {
await batchLogger.log(
batchLogger.BatchLogLevel.DEBUG,
"No pending requests found across all companies",
{ operation: BatchOperation.BATCH_CREATION }
);
return;
}
// Process companies in parallel batches
const companyIds = Array.from(pendingRequestsByCompany.keys());
const processingPromises: Promise<void>[] = [];
for (
let i = 0;
i < companyIds.length;
i += SCHEDULER_CONFIG.MAX_CONCURRENT_COMPANIES
) {
const batch = companyIds.slice(
i,
i + SCHEDULER_CONFIG.MAX_CONCURRENT_COMPANIES
);
const batchPromise = Promise.allSettled(
batch.map(async (companyId) => {
const pendingRequests = pendingRequestsByCompany.get(companyId) || [];
if (pendingRequests.length === 0) return;
const shouldCreate = await shouldCreateBatchForCompanyOptimized(
companyId,
pendingRequests.length
);
if (shouldCreate) {
await createBatchRequest(companyId, pendingRequests);
}
})
);
processingPromises.push(batchPromise.then(() => {}));
}
await Promise.all(processingPromises);
} else {
// Fallback to original sequential processing
console.warn("Using fallback sequential processing for batch creation");
// Implementation would call original functions
}
const duration = Date.now() - startTime;
updatePerformanceMetrics(duration);
}
/**
* Optimized batch status checking for all companies
*/
async function checkBatchStatusesOptimized(): Promise<void> {
const startTime = Date.now();
if (SCHEDULER_CONFIG.USE_BATCH_OPERATIONS) {
// Single query to get in-progress batches for all companies
const batchesByCompany = await getInProgressBatchesForAllCompanies();
if (batchesByCompany.size === 0) {
return;
}
// Process companies in parallel
const companyIds = Array.from(batchesByCompany.keys());
const processingPromises: Promise<void>[] = [];
for (
let i = 0;
i < companyIds.length;
i += SCHEDULER_CONFIG.MAX_CONCURRENT_COMPANIES
) {
const batch = companyIds.slice(
i,
i + SCHEDULER_CONFIG.MAX_CONCURRENT_COMPANIES
);
const batchPromise = Promise.allSettled(
batch.map(async (companyId) => {
await checkBatchStatuses(companyId);
})
);
processingPromises.push(batchPromise.then(() => {}));
}
await Promise.all(processingPromises);
}
const duration = Date.now() - startTime;
updatePerformanceMetrics(duration);
}
/**
* Optimized completed batch processing for all companies
*/
async function processCompletedBatchesOptimized(): Promise<void> {
const startTime = Date.now();
if (SCHEDULER_CONFIG.USE_BATCH_OPERATIONS) {
// Single query to get completed batches for all companies
const batchesByCompany = await getCompletedBatchesForAllCompanies();
if (batchesByCompany.size === 0) {
return;
}
// Process companies in parallel
const companyIds = Array.from(batchesByCompany.keys());
const processingPromises: Promise<void>[] = [];
for (
let i = 0;
i < companyIds.length;
i += SCHEDULER_CONFIG.MAX_CONCURRENT_COMPANIES
) {
const batch = companyIds.slice(
i,
i + SCHEDULER_CONFIG.MAX_CONCURRENT_COMPANIES
);
const batchPromise = Promise.allSettled(
batch.map(async (companyId) => {
await processCompletedBatches(companyId);
})
);
processingPromises.push(batchPromise.then(() => {}));
}
await Promise.all(processingPromises);
}
const duration = Date.now() - startTime;
updatePerformanceMetrics(duration);
}
/**
* Optimized failed request retry for all companies
*/
async function retryFailedRequestsOptimized(): Promise<void> {
const startTime = Date.now();
if (SCHEDULER_CONFIG.USE_BATCH_OPERATIONS) {
// Single query to get failed requests for all companies
const failedRequestsByCompany = await getFailedRequestsForAllCompanies();
if (failedRequestsByCompany.size === 0) {
return;
}
// Process companies in parallel
const companyIds = Array.from(failedRequestsByCompany.keys());
const processingPromises: Promise<void>[] = [];
for (
let i = 0;
i < companyIds.length;
i += SCHEDULER_CONFIG.MAX_CONCURRENT_COMPANIES
) {
const batch = companyIds.slice(
i,
i + SCHEDULER_CONFIG.MAX_CONCURRENT_COMPANIES
);
const batchPromise = Promise.allSettled(
batch.map(async (companyId) => {
await retryFailedRequests(companyId);
})
);
processingPromises.push(batchPromise.then(() => {}));
}
await Promise.all(processingPromises);
}
const duration = Date.now() - startTime;
updatePerformanceMetrics(duration);
}
/**
* Optimized version of shouldCreateBatchForCompany
*/
async function shouldCreateBatchForCompanyOptimized(
companyId: string,
pendingCount: number
): Promise<boolean> {
// Always create if we have enough requests
if (pendingCount >= SCHEDULER_CONFIG.MIN_BATCH_SIZE) {
return true;
}
// Check if oldest pending request is old enough (optimized query)
const oldestPending = await getOldestPendingRequestOptimized(companyId);
if (!oldestPending) {
return false;
}
const waitTimeMs = Date.now() - oldestPending.requestedAt.getTime();
const maxWaitTimeMs = SCHEDULER_CONFIG.MAX_WAIT_TIME_MINUTES * 60 * 1000;
return waitTimeMs >= maxWaitTimeMs;
}
/**
* Enhanced scheduler task handler with performance monitoring
*/
async function handleSchedulerTask(
taskFunction: () => Promise<void>,
taskName: string
): Promise<void> {
// Check if scheduler is paused due to too many errors
if (isPaused) {
const now = Date.now();
if (now - lastErrorTime >= SCHEDULER_CONFIG.ERROR_PAUSE_DURATION) {
console.log(
`Resuming optimized scheduler after error pause: ${taskName}`
);
isPaused = false;
consecutiveErrors = 0;
} else {
console.log(
`Optimized scheduler paused due to errors, skipping: ${taskName}`
);
return;
}
}
const startTime = Date.now();
let successCount = 0;
let errorCount = 0;
try {
await taskFunction();
successCount = 1;
// Reset error counter on success
if (consecutiveErrors > 0) {
console.log(
`Optimized scheduler recovered after ${consecutiveErrors} consecutive errors: ${taskName}`
);
consecutiveErrors = 0;
}
} catch (error) {
consecutiveErrors++;
lastErrorTime = Date.now();
errorCount = 1;
console.error(
`Error in optimized ${taskName} (attempt ${consecutiveErrors}):`,
error
);
// Pause scheduler if too many consecutive errors
if (consecutiveErrors >= SCHEDULER_CONFIG.MAX_CONSECUTIVE_ERRORS) {
isPaused = true;
console.error(
`Pausing optimized scheduler for ${SCHEDULER_CONFIG.ERROR_PAUSE_DURATION / 1000 / 60} minutes due to ${consecutiveErrors} consecutive errors`
);
}
} finally {
const duration = Date.now() - startTime;
await batchLogger.logScheduler(
`optimized_${taskName}`,
duration,
successCount,
errorCount,
errorCount > 0
? new Error(`Optimized scheduler task ${taskName} failed`)
: undefined
);
updatePerformanceMetrics(duration);
}
}
/**
* Track performance metrics
*/
function updatePerformanceMetrics(duration: number): void {
totalOperationTime += duration;
operationCount++;
}
/**
* Get optimized scheduler status with performance metrics
*/
export function getOptimizedBatchSchedulerStatus() {
const baseStatus = {
isRunning: !!(
createBatchesTask &&
checkStatusTask &&
processResultsTask &&
retryFailedTask
),
createBatchesRunning: !!createBatchesTask,
checkStatusRunning: !!checkStatusTask,
processResultsRunning: !!processResultsTask,
retryFailedRunning: !!retryFailedTask,
isPaused,
consecutiveErrors,
lastErrorTime: lastErrorTime ? new Date(lastErrorTime) : null,
circuitBreakers: getCircuitBreakerStatus(),
config: SCHEDULER_CONFIG,
};
// Add performance metrics
const performanceMetrics = {
averageOperationTime:
operationCount > 0 ? totalOperationTime / operationCount : 0,
totalOperations: operationCount,
totalOperationTime,
optimizationsEnabled: {
batchOperations: SCHEDULER_CONFIG.USE_BATCH_OPERATIONS,
parallelProcessing: SCHEDULER_CONFIG.PARALLEL_COMPANY_PROCESSING,
maxConcurrentCompanies: SCHEDULER_CONFIG.MAX_CONCURRENT_COMPANIES,
},
};
return {
...baseStatus,
performanceMetrics,
isOptimized: true,
};
}
/**
* Force resume optimized scheduler (for manual recovery)
*/
export function forceResumeOptimizedScheduler(): void {
isPaused = false;
consecutiveErrors = 0;
lastErrorTime = 0;
console.log("Optimized scheduler manually resumed, error counters reset");
}
/**
* Reset performance metrics
*/
export function resetPerformanceMetrics(): void {
totalOperationTime = 0;
operationCount = 0;
console.log("Optimized scheduler performance metrics reset");
}

386
lib/csp-monitoring.ts Normal file
View File

@ -0,0 +1,386 @@
import {
type CSPViolationReport,
detectCSPBypass,
parseCSPViolation,
} from "./csp";
export interface CSPMetrics {
totalViolations: number;
criticalViolations: number;
bypassAttempts: number;
topViolatedDirectives: Array<{ directive: string; count: number }>;
topBlockedUris: Array<{ uri: string; count: number }>;
violationTrends: Array<{ date: string; count: number }>;
}
export interface CSPAlert {
id: string;
timestamp: Date;
severity: "low" | "medium" | "high" | "critical";
type: "violation" | "bypass_attempt" | "policy_change" | "threshold_exceeded";
message: string;
metadata: Record<string, any>;
}
export class CSPMonitoringService {
private violations: Array<{
timestamp: Date;
ip: string;
userAgent?: string;
violation: ReturnType<typeof parseCSPViolation>;
bypassDetection: ReturnType<typeof detectCSPBypass>;
originalReport: CSPViolationReport;
}> = [];
private alerts: CSPAlert[] = [];
private alertThresholds = {
violationsPerMinute: 10,
bypassAttemptsPerHour: 5,
criticalViolationsPerHour: 3,
};
/**
* Process a CSP violation report
*/
async processViolation(
report: CSPViolationReport,
ip: string,
userAgent?: string
): Promise<{
shouldAlert: boolean;
alertLevel: "low" | "medium" | "high" | "critical";
recommendations: string[];
}> {
const violation = parseCSPViolation(report);
const bypassDetection = detectCSPBypass(
report["csp-report"]["blocked-uri"] +
" " +
(report["csp-report"]["script-sample"] || "")
);
// Store violation
this.violations.push({
timestamp: new Date(),
ip,
userAgent,
violation,
bypassDetection,
originalReport: report,
});
// Generate recommendations
const recommendations = this.generateRecommendations(
violation,
bypassDetection
);
// Determine alert level
const alertLevel = this.determineAlertLevel(violation, bypassDetection);
// Check if we should alert
const shouldAlert = await this.shouldTriggerAlert(
violation,
bypassDetection
);
if (shouldAlert) {
await this.createAlert({
severity: alertLevel,
type: bypassDetection.isDetected ? "bypass_attempt" : "violation",
message: this.formatAlertMessage(violation, bypassDetection),
metadata: {
directive: violation.directive,
blockedUri: violation.blockedUri,
ip,
userAgent,
bypassRisk: bypassDetection.riskLevel,
},
});
}
return {
shouldAlert,
alertLevel,
recommendations,
};
}
/**
* Get CSP violation metrics
*/
getMetrics(timeRange: { start: Date; end: Date }): CSPMetrics {
const filteredViolations = this.violations.filter(
(v) => v.timestamp >= timeRange.start && v.timestamp <= timeRange.end
);
// Count violations by directive
const directiveCounts = new Map<string, number>();
const uriCounts = new Map<string, number>();
const dailyCounts = new Map<string, number>();
for (const v of filteredViolations) {
// Directive counts
const directive = v.violation.directive;
directiveCounts.set(directive, (directiveCounts.get(directive) || 0) + 1);
// URI counts
const uri = v.violation.blockedUri;
uriCounts.set(uri, (uriCounts.get(uri) || 0) + 1);
// Daily counts
const dateKey = v.timestamp.toISOString().split("T")[0];
dailyCounts.set(dateKey, (dailyCounts.get(dateKey) || 0) + 1);
}
return {
totalViolations: filteredViolations.length,
criticalViolations: filteredViolations.filter(
(v) => v.violation.isCritical
).length,
bypassAttempts: filteredViolations.filter(
(v) => v.bypassDetection.isDetected
).length,
topViolatedDirectives: Array.from(directiveCounts.entries())
.map(([directive, count]) => ({ directive, count }))
.sort((a, b) => b.count - a.count)
.slice(0, 10),
topBlockedUris: Array.from(uriCounts.entries())
.map(([uri, count]) => ({ uri, count }))
.sort((a, b) => b.count - a.count)
.slice(0, 10),
violationTrends: Array.from(dailyCounts.entries())
.map(([date, count]) => ({ date, count }))
.sort((a, b) => a.date.localeCompare(b.date)),
};
}
/**
* Generate policy recommendations based on violations
*/
generatePolicyRecommendations(timeRange: { start: Date; end: Date }): {
allowlist: string[];
tighten: string[];
investigate: string[];
} {
const metrics = this.getMetrics(timeRange);
const allowlist: string[] = [];
const tighten: string[] = [];
const investigate: string[] = [];
// Analyze top blocked URIs for potential allowlisting
for (const { uri, count } of metrics.topBlockedUris) {
if (count > 5 && this.isLegitimateResource(uri)) {
allowlist.push(`Consider allowlisting: ${uri} (${count} violations)`);
} else if (count > 10) {
investigate.push(
`High volume blocking: ${uri} (${count} violations) - investigate if legitimate`
);
}
}
// Analyze directives for tightening
for (const { directive, count } of metrics.topViolatedDirectives) {
if (directive.includes("'unsafe-")) {
tighten.push(
`${directive} has ${count} violations - consider removing unsafe directives`
);
} else if (count > 20) {
tighten.push(
`${directive} has high violation count (${count}) - review necessity`
);
}
}
return { allowlist, tighten, investigate };
}
/**
* Export violations for external analysis
*/
exportViolations(format: "json" | "csv" = "json"): string {
if (format === "csv") {
const headers = [
"timestamp",
"ip",
"userAgent",
"directive",
"blockedUri",
"sourceFile",
"lineNumber",
"isCritical",
"isInlineViolation",
"bypassDetected",
"riskLevel",
].join(",");
const rows = this.violations.map((v) =>
[
v.timestamp.toISOString(),
v.ip,
v.userAgent || "",
v.violation.directive,
v.violation.blockedUri,
v.violation.sourceFile || "",
v.violation.lineNumber || "",
v.violation.isCritical.toString(),
v.violation.isInlineViolation.toString(),
v.bypassDetection.isDetected.toString(),
v.bypassDetection.riskLevel,
]
.map((field) => `"${field}"`)
.join(",")
);
return [headers, ...rows].join("\n");
}
return JSON.stringify(this.violations, null, 2);
}
private generateRecommendations(
violation: ReturnType<typeof parseCSPViolation>,
bypassDetection: ReturnType<typeof detectCSPBypass>
): string[] {
const recommendations: string[] = [];
if (violation.isInlineViolation) {
recommendations.push("Consider using nonce-based CSP for inline content");
}
if (violation.directive.startsWith("script-src")) {
recommendations.push(
"Review script sources and consider using 'strict-dynamic'"
);
}
if (bypassDetection.isDetected) {
recommendations.push(
"Potential security threat detected - investigate immediately"
);
if (bypassDetection.riskLevel === "high") {
recommendations.push(
"High-risk bypass attempt - consider blocking source IP"
);
}
}
if (violation.blockedUri.includes("data:")) {
recommendations.push(
"Review data URI usage - limit to necessary resources only"
);
}
return recommendations;
}
private determineAlertLevel(
violation: ReturnType<typeof parseCSPViolation>,
bypassDetection: ReturnType<typeof detectCSPBypass>
): "low" | "medium" | "high" | "critical" {
if (bypassDetection.isDetected && bypassDetection.riskLevel === "high") {
return "critical";
}
if (violation.isCritical || bypassDetection.riskLevel === "high") {
return "high";
}
if (bypassDetection.isDetected || violation.isInlineViolation) {
return "medium";
}
return "low";
}
private async shouldTriggerAlert(
violation: ReturnType<typeof parseCSPViolation>,
bypassDetection: ReturnType<typeof detectCSPBypass>
): Promise<boolean> {
// Always alert on critical violations or high-risk bypass attempts
if (violation.isCritical || bypassDetection.riskLevel === "high") {
return true;
}
// Check rate-based thresholds
const now = new Date();
const oneMinuteAgo = new Date(now.getTime() - 60 * 1000);
const oneHourAgo = new Date(now.getTime() - 60 * 60 * 1000);
const recentViolations = this.violations.filter(
(v) => v.timestamp >= oneMinuteAgo
);
const recentBypassAttempts = this.violations.filter(
(v) => v.timestamp >= oneHourAgo && v.bypassDetection.isDetected
);
const recentCriticalViolations = this.violations.filter(
(v) => v.timestamp >= oneHourAgo && v.violation.isCritical
);
return (
recentViolations.length >= this.alertThresholds.violationsPerMinute ||
recentBypassAttempts.length >=
this.alertThresholds.bypassAttemptsPerHour ||
recentCriticalViolations.length >=
this.alertThresholds.criticalViolationsPerHour
);
}
private async createAlert(
alertData: Omit<CSPAlert, "id" | "timestamp">
): Promise<void> {
const alert: CSPAlert = {
id: crypto.randomUUID(),
timestamp: new Date(),
...alertData,
};
this.alerts.push(alert);
// In production, you would send this to your monitoring service
console.error(
`🚨 CSP Alert [${alert.severity.toUpperCase()}]: ${alert.message}`
);
// You could integrate with services like:
// - Slack/Discord webhooks
// - PagerDuty
// - Email alerts
// - Monitoring dashboards (DataDog, New Relic, etc.)
}
private formatAlertMessage(
violation: ReturnType<typeof parseCSPViolation>,
bypassDetection: ReturnType<typeof detectCSPBypass>
): string {
if (bypassDetection.isDetected) {
return `CSP bypass attempt detected: ${violation.directive} blocked ${violation.blockedUri} (Risk: ${bypassDetection.riskLevel})`;
}
return `CSP violation: ${violation.directive} blocked ${violation.blockedUri}${violation.isCritical ? " (CRITICAL)" : ""}`;
}
private isLegitimateResource(uri: string): boolean {
// Simple heuristics to identify potentially legitimate resources
const legitimatePatterns = [
/^https:\/\/[a-zA-Z0-9.-]+\.(googleapis|gstatic|cloudflare|jsdelivr|unpkg)\.com/,
/^https:\/\/[a-zA-Z0-9.-]+\.(png|jpg|jpeg|gif|svg|webp|ico)$/,
/^https:\/\/fonts\.(googleapis|gstatic)\.com/,
/^https:\/\/api\.[a-zA-Z0-9.-]+\.com/,
];
return legitimatePatterns.some((pattern) => pattern.test(uri));
}
/**
* Clean up old violations to prevent memory leaks
*/
cleanupOldViolations(maxAge: number = 7 * 24 * 60 * 60 * 1000): void {
const cutoff = new Date(Date.now() - maxAge);
this.violations = this.violations.filter((v) => v.timestamp >= cutoff);
this.alerts = this.alerts.filter((a) => a.timestamp >= cutoff);
}
}
// Singleton instance for application use
export const cspMonitoring = new CSPMonitoringService();

509
lib/csp.ts Normal file
View File

@ -0,0 +1,509 @@
import crypto from "node:crypto";
import { type NextRequest, NextResponse } from "next/server";
export interface CSPConfig {
nonce?: string;
isDevelopment?: boolean;
reportUri?: string;
enforceMode?: boolean;
strictMode?: boolean;
allowedExternalDomains?: string[];
reportingLevel?: "none" | "violations" | "all";
}
export interface CSPViolationReport {
"csp-report": {
"document-uri": string;
referrer: string;
"violated-directive": string;
"original-policy": string;
"blocked-uri": string;
"source-file"?: string;
"line-number"?: number;
"column-number"?: number;
"script-sample"?: string;
};
}
/**
* Generate a cryptographically secure nonce for CSP
*/
export function generateNonce(): string {
return crypto.randomBytes(16).toString("base64");
}
/**
* Build Content Security Policy header value based on configuration
*/
export function buildCSP(config: CSPConfig = {}): string {
const {
nonce,
isDevelopment = false,
reportUri,
_enforceMode = true,
strictMode = false,
allowedExternalDomains = [],
_reportingLevel = "violations",
} = config;
// Base directives for all environments
const baseDirectives = {
"default-src": ["'self'"],
"base-uri": ["'self'"],
"form-action": ["'self'"],
"frame-ancestors": ["'none'"],
"object-src": ["'none'"],
"upgrade-insecure-requests": true,
};
// Script sources - more restrictive in production
const scriptSrc = isDevelopment
? ["'self'", "'unsafe-eval'", "'unsafe-inline'"]
: nonce
? ["'self'", `'nonce-${nonce}'`, "'strict-dynamic'"]
: ["'self'"];
// Style sources - use nonce in production when available
const styleSrc = nonce
? ["'self'", `'nonce-${nonce}'`]
: ["'self'", "'unsafe-inline'"]; // Fallback for TailwindCSS
// Image sources - allow self, data URIs, and specific trusted domains
const imgSrc = [
"'self'",
"data:",
"https://schema.org", // For structured data images
"https://livedash.notso.ai", // Application domain
"https://*.basemaps.cartocdn.com", // Leaflet map tiles
"https://*.openstreetmap.org", // OpenStreetMap tiles
...allowedExternalDomains
.filter((domain) => domain.startsWith("https://"))
.map((domain) => domain),
].filter(Boolean);
// Font sources - restrict to self and data URIs
const fontSrc = ["'self'", "data:"];
// Connect sources - API endpoints and trusted domains
const connectSrc = isDevelopment
? ["'self'", "https:", "wss:", "ws:"] // Allow broader sources in dev for HMR
: strictMode
? [
"'self'",
"https://api.openai.com", // OpenAI API
"https://livedash.notso.ai", // Application API
...allowedExternalDomains.filter(
(domain) =>
domain.startsWith("https://") || domain.startsWith("wss://")
),
].filter(Boolean)
: [
"'self'",
"https://api.openai.com", // OpenAI API
"https://livedash.notso.ai", // Application API
"https:", // Allow all HTTPS in non-strict mode
];
// Media sources - restrict to self
const mediaSrc = ["'self'"];
// Worker sources - restrict to self
const workerSrc = ["'self'"];
// Child sources - restrict to self
const childSrc = ["'self'"];
// Manifest sources - restrict to self
const manifestSrc = ["'self'"];
// Build the directive object
const directives = {
...baseDirectives,
"script-src": scriptSrc,
"style-src": styleSrc,
"img-src": imgSrc,
"font-src": fontSrc,
"connect-src": connectSrc,
"media-src": mediaSrc,
"worker-src": workerSrc,
"child-src": childSrc,
"manifest-src": manifestSrc,
};
// Add report URI if provided
if (reportUri) {
directives["report-uri"] = [reportUri];
directives["report-to"] = ["csp-endpoint"];
}
// Convert directives to CSP string
const cspString = Object.entries(directives)
.map(([directive, value]) => {
if (value === true) return directive;
if (Array.isArray(value)) return `${directive} ${value.join(" ")}`;
return `${directive} ${value}`;
})
.join("; ");
return cspString;
}
/**
* Create CSP middleware for Next.js
*/
export function createCSPMiddleware(config: CSPConfig = {}) {
return (_request: NextRequest) => {
const nonce = generateNonce();
const isDevelopment = process.env.NODE_ENV === "development";
const csp = buildCSP({
...config,
nonce,
isDevelopment,
});
const response = NextResponse.next();
// Set CSP header
response.headers.set("Content-Security-Policy", csp);
// Store nonce for use in components
response.headers.set("X-Nonce", nonce);
return response;
};
}
/**
* Enhanced CSP validation with security best practices
*/
export function validateCSP(
csp: string,
options: { strictMode?: boolean } = {}
): {
isValid: boolean;
warnings: string[];
errors: string[];
securityScore: number;
recommendations: string[];
} {
const warnings: string[] = [];
const errors: string[] = [];
const recommendations: string[] = [];
const { strictMode = false } = options;
let securityScore = 100;
// Check for unsafe directives
if (csp.includes("'unsafe-inline'") && !csp.includes("'nonce-")) {
warnings.push("Using 'unsafe-inline' without nonce is less secure");
securityScore -= 15;
recommendations.push(
"Implement nonce-based CSP for inline scripts and styles"
);
}
if (csp.includes("'unsafe-eval'")) {
if (strictMode) {
errors.push("'unsafe-eval' is not allowed in strict mode");
securityScore -= 25;
} else {
warnings.push("'unsafe-eval' allows dangerous code execution");
securityScore -= 10;
}
}
// Check for overly permissive directives (but exclude font wildcards and subdomain wildcards)
const hasProblematicWildcards =
csp.includes(" *") ||
csp.includes("*://") ||
(csp.includes("*") && !csp.includes("*.") && !csp.includes("wss: ws:"));
if (hasProblematicWildcards) {
errors.push("Wildcard (*) sources are not recommended");
securityScore -= 30;
recommendations.push("Replace wildcards with specific trusted domains");
}
if (
csp.includes("data:") &&
!csp.includes("img-src") &&
!csp.includes("font-src")
) {
warnings.push("data: URIs should be limited to specific directives");
securityScore -= 5;
}
// Check for HTTPS upgrade
if (!csp.includes("upgrade-insecure-requests")) {
warnings.push("Missing HTTPS upgrade directive");
securityScore -= 10;
recommendations.push("Add 'upgrade-insecure-requests' directive");
}
// Check for frame protection
if (!csp.includes("frame-ancestors")) {
warnings.push("Missing frame-ancestors directive");
securityScore -= 15;
recommendations.push(
"Add 'frame-ancestors 'none'' to prevent clickjacking"
);
}
// Check required directives
const requiredDirectives = [
"default-src",
"script-src",
"style-src",
"object-src",
"base-uri",
"form-action",
];
for (const directive of requiredDirectives) {
if (!csp.includes(directive)) {
errors.push(`Missing required directive: ${directive}`);
securityScore -= 20;
}
}
// Check for modern CSP features
if (csp.includes("'nonce-") && !csp.includes("'strict-dynamic'")) {
recommendations.push(
"Consider adding 'strict-dynamic' for better nonce-based security"
);
}
// Check reporting setup
if (!csp.includes("report-uri") && !csp.includes("report-to")) {
warnings.push("Missing CSP violation reporting");
securityScore -= 5;
recommendations.push("Add CSP violation reporting for monitoring");
}
// Strict mode additional checks
if (strictMode) {
if (csp.includes("https:") && !csp.includes("connect-src")) {
warnings.push("Broad HTTPS allowlist detected in strict mode");
securityScore -= 10;
recommendations.push("Replace 'https:' with specific trusted domains");
}
}
return {
isValid: errors.length === 0,
warnings,
errors,
securityScore: Math.max(0, securityScore),
recommendations,
};
}
/**
* Parse CSP violation report
*/
export function parseCSPViolation(report: CSPViolationReport): {
directive: string;
blockedUri: string;
sourceFile?: string;
lineNumber?: number;
isInlineViolation: boolean;
isCritical: boolean;
} {
const cspReport = report["csp-report"];
const isInlineViolation =
cspReport["blocked-uri"] === "inline" ||
cspReport["blocked-uri"] === "eval";
const isCritical =
cspReport["violated-directive"].startsWith("script-src") ||
cspReport["violated-directive"].startsWith("object-src");
return {
directive: cspReport["violated-directive"],
blockedUri: cspReport["blocked-uri"],
sourceFile: cspReport["source-file"],
lineNumber: cspReport["line-number"],
isInlineViolation,
isCritical,
};
}
/**
* CSP bypass detection patterns
*/
export const CSP_BYPASS_PATTERNS = [
// Common XSS bypass attempts
/javascript:/i,
/data:text\/html/i,
/vbscript:/i,
/livescript:/i,
// Base64 encoded attempts
/data:.*base64.*script/i,
/data:text\/javascript/i,
/data:application\/javascript/i,
// JSONP callback manipulation
/callback=.*script/i,
// Common CSP bypass techniques
/location\.href.*javascript/i,
/document\.write.*script/i,
/eval\(/i,
/\bnew\s+Function\s*\(/i,
/setTimeout\s*\(\s*['"`].*['"`]/i,
/setInterval\s*\(\s*['"`].*['"`]/i,
];
/**
* Test CSP implementation with common scenarios
*/
export function testCSPImplementation(csp: string): {
testResults: Array<{
name: string;
passed: boolean;
description: string;
recommendation?: string;
}>;
overallScore: number;
} {
const testResults = [];
// Test 1: Script injection protection
testResults.push({
name: "Script Injection Protection",
passed: !csp.includes("'unsafe-inline'") || csp.includes("'nonce-"),
description: "Checks if inline scripts are properly controlled",
recommendation:
csp.includes("'unsafe-inline'") && !csp.includes("'nonce-")
? "Use nonce-based CSP instead of 'unsafe-inline'"
: undefined,
});
// Test 2: Eval protection
testResults.push({
name: "Eval Protection",
passed: !csp.includes("'unsafe-eval'"),
description: "Ensures eval() and similar functions are blocked",
recommendation: csp.includes("'unsafe-eval'")
? "Remove 'unsafe-eval' to prevent code injection"
: undefined,
});
// Test 3: Object blocking
testResults.push({
name: "Object Blocking",
passed: csp.includes("object-src 'none'"),
description: "Blocks dangerous object, embed, and applet elements",
recommendation: !csp.includes("object-src 'none'")
? "Add 'object-src 'none'' to block plugins"
: undefined,
});
// Test 4: Frame protection
testResults.push({
name: "Frame Protection",
passed:
csp.includes("frame-ancestors 'none'") ||
csp.includes("frame-ancestors 'self'"),
description: "Prevents clickjacking attacks",
recommendation: !csp.includes("frame-ancestors")
? "Add 'frame-ancestors 'none'' for clickjacking protection"
: undefined,
});
// Test 5: HTTPS enforcement
testResults.push({
name: "HTTPS Enforcement",
passed: csp.includes("upgrade-insecure-requests"),
description: "Automatically upgrades HTTP requests to HTTPS",
recommendation: !csp.includes("upgrade-insecure-requests")
? "Add 'upgrade-insecure-requests' for automatic HTTPS"
: undefined,
});
// Test 6: Base URI restriction
testResults.push({
name: "Base URI Restriction",
passed: csp.includes("base-uri 'self'") || csp.includes("base-uri 'none'"),
description: "Prevents base tag injection attacks",
recommendation: !csp.includes("base-uri")
? "Add 'base-uri 'self'' to prevent base tag attacks"
: undefined,
});
// Test 7: Form action restriction
testResults.push({
name: "Form Action Restriction",
passed: csp.includes("form-action 'self'") || csp.includes("form-action"),
description: "Controls where forms can be submitted",
recommendation: !csp.includes("form-action")
? "Add 'form-action 'self'' to control form submissions"
: undefined,
});
// Test 8: Reporting configuration
testResults.push({
name: "Violation Reporting",
passed: csp.includes("report-uri") || csp.includes("report-to"),
description: "Enables CSP violation monitoring",
recommendation:
!csp.includes("report-uri") && !csp.includes("report-to")
? "Add 'report-uri' for violation monitoring"
: undefined,
});
const passedTests = testResults.filter((test) => test.passed).length;
const overallScore = Math.round((passedTests / testResults.length) * 100);
return {
testResults,
overallScore,
};
}
/**
* Detect potential CSP bypass attempts
*/
export function detectCSPBypass(content: string): {
isDetected: boolean;
patterns: string[];
riskLevel: "low" | "medium" | "high";
} {
const detectedPatterns: string[] = [];
for (const pattern of CSP_BYPASS_PATTERNS) {
if (pattern.test(content)) {
detectedPatterns.push(pattern.source);
}
}
// Determine risk level based on pattern types
const highRiskPatterns = [
/javascript:/i,
/eval\(/i,
/\bnew\s+Function\s*\(/i,
/data:text\/javascript/i,
/data:application\/javascript/i,
/data:.*base64.*script/i,
];
const hasHighRiskPattern = detectedPatterns.some((pattern) =>
highRiskPatterns.some((highRisk) => highRisk.source === pattern)
);
const riskLevel =
hasHighRiskPattern || detectedPatterns.length >= 3
? "high"
: detectedPatterns.length >= 1
? "medium"
: "low";
return {
isDetected: detectedPatterns.length > 0,
patterns: detectedPatterns,
riskLevel,
};
}

View File

@ -148,7 +148,7 @@ export class CSRFProtection {
}
// Get token from request
const requestToken = await this.getTokenFromRequest(request);
const requestToken = await CSRFProtection.getTokenFromRequest(request);
if (!requestToken) {
return {
valid: false,
@ -193,7 +193,9 @@ export class CSRFProtection {
/**
* Extract token from request (handles different content types)
*/
private static async getTokenFromRequest(request: NextRequest): Promise<string | null> {
private static async getTokenFromRequest(
request: NextRequest
): Promise<string | null> {
// Check header first
const headerToken = request.headers.get(CSRF_CONFIG.headerName);
if (headerToken) {
@ -207,7 +209,11 @@ export class CSRFProtection {
if (contentType?.includes("application/json")) {
const body = await request.clone().json();
return body.csrfToken || body.csrf_token || null;
} else if (contentType?.includes("multipart/form-data") || contentType?.includes("application/x-www-form-urlencoded")) {
}
if (
contentType?.includes("multipart/form-data") ||
contentType?.includes("application/x-www-form-urlencoded")
) {
const formData = await request.clone().formData();
return formData.get("csrf_token") as string | null;
}
@ -270,11 +276,13 @@ export const CSRFClient = {
/**
* Add CSRF token to object (for JSON requests)
*/
addTokenToObject<T extends Record<string, unknown>>(obj: T): T & { csrfToken: string } {
addTokenToObject<T extends Record<string, unknown>>(
obj: T
): T & { csrfToken: string } {
const token = this.getToken();
return {
...obj,
csrfToken: token || "",
};
},
};
};

View File

@ -80,7 +80,10 @@ export const env = {
NODE_ENV: parseEnvValue(process.env.NODE_ENV) || "development",
// CSRF Protection
CSRF_SECRET: parseEnvValue(process.env.CSRF_SECRET) || parseEnvValue(process.env.NEXTAUTH_SECRET) || "fallback-csrf-secret",
CSRF_SECRET:
parseEnvValue(process.env.CSRF_SECRET) ||
parseEnvValue(process.env.NEXTAUTH_SECRET) ||
"fallback-csrf-secret",
// OpenAI
OPENAI_API_KEY: parseEnvValue(process.env.OPENAI_API_KEY) || "",

View File

@ -42,7 +42,8 @@ export function useCSRF() {
throw new Error("Invalid response from CSRF endpoint");
}
} catch (err) {
const errorMessage = err instanceof Error ? err.message : "Failed to fetch CSRF token";
const errorMessage =
err instanceof Error ? err.message : "Failed to fetch CSRF token";
setError(errorMessage);
console.error("CSRF token fetch error:", errorMessage);
} finally {
@ -188,4 +189,4 @@ export function useCSRFForm() {
addTokenToFormData: CSRFClient.addTokenToFormData,
addTokenToObject: CSRFClient.addTokenToObject,
};
}
}

View File

@ -150,7 +150,7 @@ class OpenAIMockServer {
} else {
// Use simple response generators for other types
const detectedType = this.extractProcessingType(
systemMessage + " " + userMessage
`${systemMessage} ${userMessage}`
);
response = MOCK_RESPONSE_GENERATORS[detectedType](userMessage);
processingType = detectedType;

View File

@ -204,7 +204,7 @@ export function generateSessionAnalysisResponse(
const sentences = text.split(/[.!?]+/).filter((s) => s.trim().length > 0);
let summary = sentences[0]?.trim() || text.substring(0, 100);
if (summary.length > 150) {
summary = summary.substring(0, 147) + "...";
summary = `${summary.substring(0, 147)}...`;
}
if (summary.length < 10) {
summary = "User inquiry regarding company policies";
@ -360,7 +360,7 @@ export function generateSummaryResponse(text: string): MockChatCompletion {
let summary = sentences[0]?.trim() || text.substring(0, 100);
if (summary.length > 150) {
summary = summary.substring(0, 147) + "...";
summary = `${summary.substring(0, 147)}...`;
}
const promptTokens = Math.ceil(text.length / 4);

30
lib/nonce-context.tsx Normal file
View File

@ -0,0 +1,30 @@
"use client";
import { createContext, type ReactNode, useContext } from "react";
interface NonceContextType {
nonce?: string;
}
const NonceContext = createContext<NonceContextType>({});
export function NonceProvider({
children,
nonce,
}: {
children: ReactNode;
nonce?: string;
}) {
return (
<NonceContext.Provider value={{ nonce }}>{children}</NonceContext.Provider>
);
}
export function useNonce() {
const context = useContext(NonceContext);
return context.nonce;
}
export function useCSPNonce() {
return useNonce();
}

28
lib/nonce-utils.ts Normal file
View File

@ -0,0 +1,28 @@
import { headers } from "next/headers";
/**
* Get the CSP nonce from request headers (server-side only)
*/
export async function getNonce(): Promise<string | undefined> {
try {
const headersList = await headers();
return headersList.get("X-Nonce") || undefined;
} catch {
// Headers not available (e.g., in client-side code)
return undefined;
}
}
/**
* Create script props with nonce for CSP compliance
*/
export function createScriptProps(nonce?: string) {
return nonce ? { nonce } : {};
}
/**
* Create style props with nonce for CSP compliance
*/
export function createStyleProps(nonce?: string) {
return nonce ? { nonce } : {};
}

View File

@ -2,6 +2,11 @@ import bcrypt from "bcryptjs";
import type { NextAuthOptions } from "next-auth";
import CredentialsProvider from "next-auth/providers/credentials";
import { prisma } from "./prisma";
import {
AuditOutcome,
createAuditMetadata,
securityAuditLogger,
} from "./securityAuditLogger";
// Define the shape of the JWT token for platform users
declare module "next-auth/jwt" {
@ -47,6 +52,17 @@ export const platformAuthOptions: NextAuthOptions = {
},
async authorize(credentials) {
if (!credentials?.email || !credentials?.password) {
await securityAuditLogger.logPlatformAdmin(
"platform_login_attempt",
AuditOutcome.FAILURE,
{
metadata: createAuditMetadata({
error: "missing_credentials",
email: credentials?.email ? "[REDACTED]" : "missing",
}),
},
"Missing email or password for platform login"
);
return null;
}
@ -54,13 +70,55 @@ export const platformAuthOptions: NextAuthOptions = {
where: { email: credentials.email },
});
if (!platformUser) return null;
if (!platformUser) {
await securityAuditLogger.logPlatformAdmin(
"platform_login_attempt",
AuditOutcome.FAILURE,
{
metadata: createAuditMetadata({
error: "user_not_found",
email: "[REDACTED]",
}),
},
"Platform user not found"
);
return null;
}
const valid = await bcrypt.compare(
credentials.password,
platformUser.password
);
if (!valid) return null;
if (!valid) {
await securityAuditLogger.logPlatformAdmin(
"platform_login_attempt",
AuditOutcome.FAILURE,
{
platformUserId: platformUser.id,
metadata: createAuditMetadata({
error: "invalid_password",
email: "[REDACTED]",
role: platformUser.role,
}),
},
"Invalid password for platform login"
);
return null;
}
// Log successful platform authentication
await securityAuditLogger.logPlatformAdmin(
"platform_login_success",
AuditOutcome.SUCCESS,
{
platformUserId: platformUser.id,
metadata: createAuditMetadata({
role: platformUser.role,
name: platformUser.name,
}),
}
);
return {
id: platformUser.id,

View File

@ -77,6 +77,49 @@ export class InMemoryRateLimiter {
}
}
/**
* Check rate limit with custom parameters
*/
async check(
key: string,
maxAttempts: number,
windowMs: number
): Promise<{
success: boolean;
remaining: number;
}> {
const now = Date.now();
let attempt = this.attempts.get(key);
if (!attempt || now > attempt.resetTime) {
// Initialize or reset the attempt
attempt = {
count: 1,
resetTime: now + windowMs,
};
this.attempts.set(key, attempt);
return {
success: true,
remaining: maxAttempts - 1,
};
}
if (attempt.count >= maxAttempts) {
return {
success: false,
remaining: 0,
};
}
attempt.count++;
this.attempts.set(key, attempt);
return {
success: true,
remaining: maxAttempts - attempt.count,
};
}
/**
* Clean up resources
*/
@ -87,6 +130,16 @@ export class InMemoryRateLimiter {
}
}
/**
* Default rate limiter instance for general use
*/
export const rateLimiter = new InMemoryRateLimiter({
maxAttempts: 100,
windowMs: 15 * 60 * 1000, // 15 minutes
maxEntries: 10000,
cleanupIntervalMs: 5 * 60 * 1000, // 5 minutes
});
/**
* Extract client IP address from request headers
*/

View File

@ -1,5 +1,6 @@
// Combined scheduler initialization with graceful shutdown
import { auditLogScheduler } from "./auditLogScheduler";
import { prisma } from "./prisma";
import { startProcessingScheduler } from "./processingScheduler";
import { startCsvImportScheduler } from "./scheduler";
@ -8,6 +9,7 @@ import { startCsvImportScheduler } from "./scheduler";
* Initialize all schedulers
* - CSV import scheduler (runs every 15 minutes)
* - Session processing scheduler (runs every hour)
* - Audit log retention scheduler (runs weekly by default)
*/
export function initializeSchedulers() {
// Start the CSV import scheduler
@ -16,6 +18,14 @@ export function initializeSchedulers() {
// Start the session processing scheduler
startProcessingScheduler();
// Start the audit log retention scheduler
if (process.env.AUDIT_LOG_RETENTION_ENABLED !== "false") {
auditLogScheduler.start();
console.log("Audit log retention scheduler started");
} else {
console.log("Audit log retention scheduler disabled");
}
console.log("All schedulers initialized successfully");
// Set up graceful shutdown for schedulers
@ -30,6 +40,10 @@ function setupGracefulShutdown() {
console.log(`\nReceived ${signal}. Starting graceful shutdown...`);
try {
// Stop the audit log scheduler
auditLogScheduler.stop();
console.log("Audit log scheduler stopped.");
// Disconnect from database
await prisma.$disconnect();
console.log("Database connections closed.");

443
lib/securityAuditLogger.ts Normal file
View File

@ -0,0 +1,443 @@
import type { NextRequest } from "next/server";
import { prisma } from "./prisma";
import { extractClientIP } from "./rateLimiter";
export interface AuditLogContext {
userId?: string;
companyId?: string;
platformUserId?: string;
sessionId?: string;
requestId?: string;
userAgent?: string;
ipAddress?: string;
country?: string;
metadata?: Record<string, any>;
}
export interface AuditLogEntry {
eventType: SecurityEventType;
action: string;
outcome: AuditOutcome;
severity?: AuditSeverity;
errorMessage?: string;
context?: AuditLogContext;
}
export enum SecurityEventType {
AUTHENTICATION = "AUTHENTICATION",
AUTHORIZATION = "AUTHORIZATION",
USER_MANAGEMENT = "USER_MANAGEMENT",
COMPANY_MANAGEMENT = "COMPANY_MANAGEMENT",
RATE_LIMITING = "RATE_LIMITING",
CSRF_PROTECTION = "CSRF_PROTECTION",
SECURITY_HEADERS = "SECURITY_HEADERS",
PASSWORD_RESET = "PASSWORD_RESET",
PLATFORM_ADMIN = "PLATFORM_ADMIN",
DATA_PRIVACY = "DATA_PRIVACY",
SYSTEM_CONFIG = "SYSTEM_CONFIG",
API_SECURITY = "API_SECURITY",
}
export enum AuditOutcome {
SUCCESS = "SUCCESS",
FAILURE = "FAILURE",
BLOCKED = "BLOCKED",
RATE_LIMITED = "RATE_LIMITED",
SUSPICIOUS = "SUSPICIOUS",
}
export enum AuditSeverity {
INFO = "INFO",
LOW = "LOW",
MEDIUM = "MEDIUM",
HIGH = "HIGH",
CRITICAL = "CRITICAL",
}
class SecurityAuditLogger {
private isEnabled: boolean;
constructor() {
this.isEnabled = process.env.AUDIT_LOGGING_ENABLED !== "false";
}
async log(entry: AuditLogEntry): Promise<void> {
if (!this.isEnabled) {
return;
}
try {
await prisma.securityAuditLog.create({
data: {
eventType: entry.eventType,
action: entry.action,
outcome: entry.outcome,
severity: entry.severity || AuditSeverity.INFO,
userId: entry.context?.userId || null,
companyId: entry.context?.companyId || null,
platformUserId: entry.context?.platformUserId || null,
ipAddress: entry.context?.ipAddress || null,
userAgent: entry.context?.userAgent || null,
country: entry.context?.country || null,
sessionId: entry.context?.sessionId || null,
requestId: entry.context?.requestId || null,
metadata: entry.context?.metadata || null,
errorMessage: entry.errorMessage || null,
},
});
} catch (error) {
console.error("Failed to write audit log:", error);
}
}
async logAuthentication(
action: string,
outcome: AuditOutcome,
context: AuditLogContext,
errorMessage?: string
): Promise<void> {
const severity = this.getAuthenticationSeverity(outcome);
await this.log({
eventType: SecurityEventType.AUTHENTICATION,
action,
outcome,
severity,
errorMessage,
context,
});
}
async logAuthorization(
action: string,
outcome: AuditOutcome,
context: AuditLogContext,
errorMessage?: string
): Promise<void> {
const severity =
outcome === AuditOutcome.BLOCKED
? AuditSeverity.MEDIUM
: AuditSeverity.INFO;
await this.log({
eventType: SecurityEventType.AUTHORIZATION,
action,
outcome,
severity,
errorMessage,
context,
});
}
async logUserManagement(
action: string,
outcome: AuditOutcome,
context: AuditLogContext,
errorMessage?: string
): Promise<void> {
const severity = this.getUserManagementSeverity(action, outcome);
await this.log({
eventType: SecurityEventType.USER_MANAGEMENT,
action,
outcome,
severity,
errorMessage,
context,
});
}
async logCompanyManagement(
action: string,
outcome: AuditOutcome,
context: AuditLogContext,
errorMessage?: string
): Promise<void> {
const severity = this.getCompanyManagementSeverity(action, outcome);
await this.log({
eventType: SecurityEventType.COMPANY_MANAGEMENT,
action,
outcome,
severity,
errorMessage,
context,
});
}
async logRateLimiting(
action: string,
outcome: AuditOutcome,
context: AuditLogContext,
errorMessage?: string
): Promise<void> {
const severity =
outcome === AuditOutcome.RATE_LIMITED
? AuditSeverity.MEDIUM
: AuditSeverity.LOW;
await this.log({
eventType: SecurityEventType.RATE_LIMITING,
action,
outcome,
severity,
errorMessage,
context,
});
}
async logCSRFProtection(
action: string,
outcome: AuditOutcome,
context: AuditLogContext,
errorMessage?: string
): Promise<void> {
const severity =
outcome === AuditOutcome.BLOCKED
? AuditSeverity.HIGH
: AuditSeverity.MEDIUM;
await this.log({
eventType: SecurityEventType.CSRF_PROTECTION,
action,
outcome,
severity,
errorMessage,
context,
});
}
async logSecurityHeaders(
action: string,
outcome: AuditOutcome,
context: AuditLogContext,
errorMessage?: string
): Promise<void> {
const severity =
outcome === AuditOutcome.BLOCKED
? AuditSeverity.MEDIUM
: AuditSeverity.LOW;
await this.log({
eventType: SecurityEventType.SECURITY_HEADERS,
action,
outcome,
severity,
errorMessage,
context,
});
}
async logPasswordReset(
action: string,
outcome: AuditOutcome,
context: AuditLogContext,
errorMessage?: string
): Promise<void> {
const severity = this.getPasswordResetSeverity(action, outcome);
await this.log({
eventType: SecurityEventType.PASSWORD_RESET,
action,
outcome,
severity,
errorMessage,
context,
});
}
async logPlatformAdmin(
action: string,
outcome: AuditOutcome,
context: AuditLogContext,
errorMessage?: string
): Promise<void> {
const severity = AuditSeverity.HIGH; // All platform admin actions are high severity
await this.log({
eventType: SecurityEventType.PLATFORM_ADMIN,
action,
outcome,
severity,
errorMessage,
context,
});
}
async logDataPrivacy(
action: string,
outcome: AuditOutcome,
context: AuditLogContext,
errorMessage?: string
): Promise<void> {
const severity = AuditSeverity.HIGH; // Data privacy events are always high severity
await this.log({
eventType: SecurityEventType.DATA_PRIVACY,
action,
outcome,
severity,
errorMessage,
context,
});
}
async logAPIStatus(
action: string,
outcome: AuditOutcome,
context: AuditLogContext,
errorMessage?: string
): Promise<void> {
const severity = this.getAPISecuritySeverity(outcome);
await this.log({
eventType: SecurityEventType.API_SECURITY,
action,
outcome,
severity,
errorMessage,
context,
});
}
private getAuthenticationSeverity(outcome: AuditOutcome): AuditSeverity {
switch (outcome) {
case AuditOutcome.SUCCESS:
return AuditSeverity.INFO;
case AuditOutcome.FAILURE:
return AuditSeverity.MEDIUM;
case AuditOutcome.BLOCKED:
case AuditOutcome.RATE_LIMITED:
return AuditSeverity.HIGH;
case AuditOutcome.SUSPICIOUS:
return AuditSeverity.MEDIUM;
default:
return AuditSeverity.INFO;
}
}
private getUserManagementSeverity(
action: string,
outcome: AuditOutcome
): AuditSeverity {
const privilegedActions = ["delete", "suspend", "elevate", "grant"];
const isPrivilegedAction = privilegedActions.some((pa) =>
action.toLowerCase().includes(pa)
);
if (isPrivilegedAction) {
return outcome === AuditOutcome.SUCCESS
? AuditSeverity.HIGH
: AuditSeverity.MEDIUM;
}
return outcome === AuditOutcome.SUCCESS
? AuditSeverity.MEDIUM
: AuditSeverity.LOW;
}
private getCompanyManagementSeverity(
action: string,
outcome: AuditOutcome
): AuditSeverity {
const criticalActions = ["suspend", "delete", "archive"];
const isCriticalAction = criticalActions.some((ca) =>
action.toLowerCase().includes(ca)
);
if (isCriticalAction) {
return outcome === AuditOutcome.SUCCESS
? AuditSeverity.CRITICAL
: AuditSeverity.HIGH;
}
return outcome === AuditOutcome.SUCCESS
? AuditSeverity.HIGH
: AuditSeverity.MEDIUM;
}
private getPasswordResetSeverity(
action: string,
outcome: AuditOutcome
): AuditSeverity {
if (action.toLowerCase().includes("complete")) {
return outcome === AuditOutcome.SUCCESS
? AuditSeverity.MEDIUM
: AuditSeverity.LOW;
}
return AuditSeverity.INFO;
}
private getAPISecuritySeverity(outcome: AuditOutcome): AuditSeverity {
switch (outcome) {
case AuditOutcome.BLOCKED:
return AuditSeverity.HIGH;
case AuditOutcome.SUSPICIOUS:
return AuditSeverity.MEDIUM;
case AuditOutcome.RATE_LIMITED:
return AuditSeverity.MEDIUM;
default:
return AuditSeverity.LOW;
}
}
static extractContextFromRequest(
request: NextRequest
): Partial<AuditLogContext> {
return {
ipAddress: extractClientIP(request),
userAgent: request.headers.get("user-agent") || undefined,
requestId: request.headers.get("x-request-id") || crypto.randomUUID(),
};
}
static createSessionContext(sessionId?: string): Partial<AuditLogContext> {
return {
sessionId,
requestId: crypto.randomUUID(),
};
}
}
export const securityAuditLogger = new SecurityAuditLogger();
export async function createAuditContext(
request?: NextRequest,
session?: any,
additionalContext?: Partial<AuditLogContext>
): Promise<AuditLogContext> {
const context: AuditLogContext = {
requestId: crypto.randomUUID(),
...additionalContext,
};
if (request) {
const requestContext =
SecurityAuditLogger.extractContextFromRequest(request);
Object.assign(context, requestContext);
}
if (session?.user) {
context.userId = session.user.id;
context.companyId = session.user.companyId;
if (session.user.isPlatformUser) {
context.platformUserId = session.user.id;
}
}
return context;
}
export function createAuditMetadata(
data: Record<string, any>
): Record<string, any> {
const sanitized: Record<string, any> = {};
for (const [key, value] of Object.entries(data)) {
if (
typeof value === "string" ||
typeof value === "number" ||
typeof value === "boolean"
) {
sanitized[key] = value;
} else if (Array.isArray(value)) {
sanitized[key] = value.map((item) =>
typeof item === "object" ? "[Object]" : item
);
} else if (typeof value === "object" && value !== null) {
sanitized[key] = "[Object]";
}
}
return sanitized;
}

960
lib/securityMonitoring.ts Normal file
View File

@ -0,0 +1,960 @@
import { prisma } from "./prisma";
import {
type AuditLogContext,
AuditOutcome,
AuditSeverity,
SecurityEventType,
securityAuditLogger,
} from "./securityAuditLogger";
export interface SecurityAlert {
id: string;
timestamp: Date;
severity: AlertSeverity;
type: AlertType;
title: string;
description: string;
eventType: SecurityEventType;
context: AuditLogContext;
metadata: Record<string, any>;
acknowledged: boolean;
acknowledgedBy?: string;
acknowledgedAt?: Date;
}
export enum AlertSeverity {
LOW = "LOW",
MEDIUM = "MEDIUM",
HIGH = "HIGH",
CRITICAL = "CRITICAL",
}
export enum AlertType {
AUTHENTICATION_ANOMALY = "AUTHENTICATION_ANOMALY",
RATE_LIMIT_BREACH = "RATE_LIMIT_BREACH",
MULTIPLE_FAILED_LOGINS = "MULTIPLE_FAILED_LOGINS",
SUSPICIOUS_IP_ACTIVITY = "SUSPICIOUS_IP_ACTIVITY",
PRIVILEGE_ESCALATION = "PRIVILEGE_ESCALATION",
DATA_BREACH_ATTEMPT = "DATA_BREACH_ATTEMPT",
CSRF_ATTACK = "CSRF_ATTACK",
CSP_VIOLATION_SPIKE = "CSP_VIOLATION_SPIKE",
ACCOUNT_ENUMERATION = "ACCOUNT_ENUMERATION",
BRUTE_FORCE_ATTACK = "BRUTE_FORCE_ATTACK",
UNUSUAL_ADMIN_ACTIVITY = "UNUSUAL_ADMIN_ACTIVITY",
GEOLOCATION_ANOMALY = "GEOLOCATION_ANOMALY",
MASS_DATA_ACCESS = "MASS_DATA_ACCESS",
SUSPICIOUS_USER_AGENT = "SUSPICIOUS_USER_AGENT",
SESSION_HIJACKING = "SESSION_HIJACKING",
}
export interface SecurityMetrics {
totalEvents: number;
criticalEvents: number;
activeAlerts: number;
resolvedAlerts: number;
securityScore: number;
threatLevel: ThreatLevel;
eventsByType: Record<SecurityEventType, number>;
alertsByType: Record<AlertType, number>;
topThreats: Array<{ type: AlertType; count: number }>;
geoDistribution: Record<string, number>;
timeDistribution: Array<{ hour: number; count: number }>;
userRiskScores: Array<{ userId: string; email: string; riskScore: number }>;
}
export enum ThreatLevel {
LOW = "LOW",
MODERATE = "MODERATE",
HIGH = "HIGH",
CRITICAL = "CRITICAL",
}
export interface MonitoringConfig {
thresholds: {
failedLoginsPerMinute: number;
failedLoginsPerHour: number;
rateLimitViolationsPerMinute: number;
cspViolationsPerMinute: number;
adminActionsPerHour: number;
massDataAccessThreshold: number;
suspiciousIPThreshold: number;
};
alerting: {
enabled: boolean;
channels: AlertChannel[];
suppressDuplicateMinutes: number;
escalationTimeoutMinutes: number;
};
retention: {
alertRetentionDays: number;
metricsRetentionDays: number;
};
}
export enum AlertChannel {
EMAIL = "EMAIL",
WEBHOOK = "WEBHOOK",
SLACK = "SLACK",
DISCORD = "DISCORD",
PAGERDUTY = "PAGERDUTY",
}
export interface AnomalyDetectionResult {
isAnomaly: boolean;
confidence: number;
type: string;
description: string;
recommendedActions: string[];
}
class SecurityMonitoringService {
private alerts: SecurityAlert[] = [];
private config: MonitoringConfig;
private eventBuffer: Array<{
timestamp: Date;
eventType: SecurityEventType;
context: AuditLogContext;
outcome: AuditOutcome;
severity: AuditSeverity;
}> = [];
constructor() {
this.config = this.getDefaultConfig();
this.startBackgroundProcessing();
}
/**
* Process security event and check for threats
*/
async processSecurityEvent(
eventType: SecurityEventType,
outcome: AuditOutcome,
context: AuditLogContext,
severity: AuditSeverity = AuditSeverity.INFO,
metadata?: Record<string, any>
): Promise<void> {
// Add event to buffer for analysis
this.eventBuffer.push({
timestamp: new Date(),
eventType,
context,
outcome,
severity,
});
// Immediate threat detection
const threats = await this.detectImediateThreats(
eventType,
outcome,
context,
metadata
);
for (const threat of threats) {
await this.createAlert(threat);
}
// Anomaly detection
const anomaly = await this.detectAnomalies(eventType, context);
if (anomaly.isAnomaly && anomaly.confidence > 0.7) {
await this.createAlert({
severity: this.mapConfidenceToSeverity(anomaly.confidence),
type: AlertType.AUTHENTICATION_ANOMALY,
title: `Anomaly Detected: ${anomaly.type}`,
description: anomaly.description,
eventType,
context,
metadata: { anomaly, confidence: anomaly.confidence },
});
}
// Clean old events to prevent memory issues
this.cleanupEventBuffer();
}
/**
* Get comprehensive security metrics
*/
async getSecurityMetrics(
timeRange: { start: Date; end: Date },
companyId?: string
): Promise<SecurityMetrics> {
const whereClause = {
timestamp: {
gte: timeRange.start,
lte: timeRange.end,
},
...(companyId && { companyId }),
};
// Get audit log data
const events = await prisma.securityAuditLog.findMany({
where: whereClause,
include: {
user: { select: { email: true } },
company: { select: { name: true } },
},
});
// Calculate metrics
const totalEvents = events.length;
const criticalEvents = events.filter(
(e) => e.severity === AuditSeverity.CRITICAL
).length;
const activeAlerts = this.alerts.filter((a) => !a.acknowledged).length;
const resolvedAlerts = this.alerts.filter((a) => a.acknowledged).length;
// Event distribution by type
const eventsByType = events.reduce(
(acc, event) => {
acc[event.eventType] = (acc[event.eventType] || 0) + 1;
return acc;
},
{} as Record<SecurityEventType, number>
);
// Alert distribution by type
const alertsByType = this.alerts.reduce(
(acc, alert) => {
acc[alert.type] = (acc[alert.type] || 0) + 1;
return acc;
},
{} as Record<AlertType, number>
);
// Top threats
const topThreats = Object.entries(alertsByType)
.map(([type, count]) => ({ type: type as AlertType, count }))
.sort((a, b) => b.count - a.count)
.slice(0, 5);
// Geographic distribution
const geoDistribution = events.reduce(
(acc, event) => {
if (event.country) {
acc[event.country] = (acc[event.country] || 0) + 1;
}
return acc;
},
{} as Record<string, number>
);
// Time distribution (by hour)
const timeDistribution = Array.from({ length: 24 }, (_, hour) => ({
hour,
count: events.filter((e) => e.timestamp.getHours() === hour).length,
}));
// User risk scores
const userRiskScores = await this.calculateUserRiskScores(events);
// Calculate overall security score
const securityScore = this.calculateSecurityScore({
totalEvents,
criticalEvents,
activeAlerts,
topThreats,
});
// Determine threat level
const threatLevel = this.determineThreatLevel(
securityScore,
activeAlerts,
criticalEvents
);
return {
totalEvents,
criticalEvents,
activeAlerts,
resolvedAlerts,
securityScore,
threatLevel,
eventsByType,
alertsByType,
topThreats,
geoDistribution,
timeDistribution,
userRiskScores,
};
}
/**
* Get active security alerts
*/
getActiveAlerts(severity?: AlertSeverity): SecurityAlert[] {
return this.alerts.filter(
(alert) =>
!alert.acknowledged && (!severity || alert.severity === severity)
);
}
/**
* Acknowledge an alert
*/
async acknowledgeAlert(
alertId: string,
acknowledgedBy: string
): Promise<boolean> {
const alert = this.alerts.find((a) => a.id === alertId);
if (!alert) return false;
alert.acknowledged = true;
alert.acknowledgedBy = acknowledgedBy;
alert.acknowledgedAt = new Date();
// Log the acknowledgment
await securityAuditLogger.log({
eventType: SecurityEventType.SYSTEM_CONFIG,
action: "alert_acknowledged",
outcome: AuditOutcome.SUCCESS,
severity: AuditSeverity.INFO,
context: {
userId: acknowledgedBy,
metadata: { alertId, alertType: alert.type },
},
});
return true;
}
/**
* Export security data for analysis
*/
exportSecurityData(
format: "json" | "csv",
timeRange: { start: Date; end: Date }
): string {
const filteredAlerts = this.alerts.filter(
(a) => a.timestamp >= timeRange.start && a.timestamp <= timeRange.end
);
if (format === "csv") {
const headers = [
"timestamp",
"severity",
"type",
"title",
"description",
"eventType",
"userId",
"companyId",
"ipAddress",
"userAgent",
"acknowledged",
].join(",");
const rows = filteredAlerts.map((alert) =>
[
alert.timestamp.toISOString(),
alert.severity,
alert.type,
`"${alert.title}"`,
`"${alert.description}"`,
alert.eventType,
alert.context.userId || "",
alert.context.companyId || "",
alert.context.ipAddress || "",
alert.context.userAgent || "",
alert.acknowledged.toString(),
].join(",")
);
return [headers, ...rows].join("\n");
}
return JSON.stringify(filteredAlerts, null, 2);
}
/**
* Configure monitoring thresholds
*/
updateConfig(config: Partial<MonitoringConfig>): void {
this.config = this.deepMerge(this.config, config);
}
/**
* Deep merge helper function for config updates
*/
private deepMerge(target: any, source: any): any {
const result = { ...target };
for (const key in source) {
if (
source[key] !== null &&
typeof source[key] === "object" &&
!Array.isArray(source[key])
) {
result[key] = this.deepMerge(target[key] || {}, source[key]);
} else {
result[key] = source[key];
}
}
return result;
}
/**
* Get current monitoring configuration
*/
getConfig(): MonitoringConfig {
return { ...this.config };
}
/**
* Calculate threat level for a specific IP
*/
async calculateIPThreatLevel(ipAddress: string): Promise<{
threatLevel: ThreatLevel;
riskFactors: string[];
recommendations: string[];
}> {
const oneDayAgo = new Date(Date.now() - 24 * 60 * 60 * 1000);
const events = await prisma.securityAuditLog.findMany({
where: {
ipAddress,
timestamp: { gte: oneDayAgo },
},
});
const riskFactors: string[] = [];
const recommendations: string[] = [];
// Failed login attempts
const failedLogins = events.filter(
(e) =>
e.eventType === SecurityEventType.AUTHENTICATION &&
e.outcome === AuditOutcome.FAILURE
).length;
if (failedLogins > 10) {
riskFactors.push(`${failedLogins} failed login attempts in 24h`);
recommendations.push("Consider temporary IP blocking");
}
// Rate limit violations
const rateLimitViolations = events.filter(
(e) => e.outcome === AuditOutcome.RATE_LIMITED
).length;
if (rateLimitViolations > 5) {
riskFactors.push(`${rateLimitViolations} rate limit violations`);
recommendations.push("Implement stricter rate limiting");
}
// Multiple user attempts
const uniqueUsers = new Set(events.map((e) => e.userId).filter(Boolean))
.size;
if (uniqueUsers > 5) {
riskFactors.push(`Access attempts to ${uniqueUsers} different accounts`);
recommendations.push("Investigate for account enumeration");
}
// Determine threat level
let threatLevel = ThreatLevel.LOW;
if (riskFactors.length >= 3) threatLevel = ThreatLevel.CRITICAL;
else if (riskFactors.length >= 2) threatLevel = ThreatLevel.HIGH;
else if (riskFactors.length >= 1) threatLevel = ThreatLevel.MODERATE;
// Ensure we always provide at least basic analysis
if (riskFactors.length === 0) {
riskFactors.push(`${events.length} security events in 24h`);
}
if (recommendations.length === 0) {
recommendations.push("Continue monitoring for suspicious activity");
}
return { threatLevel, riskFactors, recommendations };
}
private async detectImediateThreats(
eventType: SecurityEventType,
outcome: AuditOutcome,
context: AuditLogContext,
metadata?: Record<string, any>
): Promise<Array<Omit<SecurityAlert, "id" | "timestamp" | "acknowledged">>> {
const threats: Array<
Omit<SecurityAlert, "id" | "timestamp" | "acknowledged">
> = [];
const now = new Date();
// Multiple failed logins detection
if (
eventType === SecurityEventType.AUTHENTICATION &&
outcome === AuditOutcome.FAILURE &&
context.ipAddress
) {
const fiveMinutesAgo = new Date(now.getTime() - 5 * 60 * 1000);
const recentFailures = await prisma.securityAuditLog.count({
where: {
eventType: SecurityEventType.AUTHENTICATION,
outcome: AuditOutcome.FAILURE,
ipAddress: context.ipAddress,
timestamp: { gte: fiveMinutesAgo },
},
});
if (recentFailures >= this.config.thresholds.failedLoginsPerMinute) {
threats.push({
severity: AlertSeverity.HIGH,
type: AlertType.BRUTE_FORCE_ATTACK,
title: "Brute Force Attack Detected",
description: `${recentFailures} failed login attempts from IP ${context.ipAddress} in 5 minutes`,
eventType,
context,
metadata: { failedAttempts: recentFailures, ...metadata },
});
}
}
// Suspicious admin activity
if (
eventType === SecurityEventType.PLATFORM_ADMIN ||
(eventType === SecurityEventType.USER_MANAGEMENT && context.userId)
) {
const oneHourAgo = new Date(now.getTime() - 60 * 60 * 1000);
const adminActions = await prisma.securityAuditLog.count({
where: {
userId: context.userId,
eventType: {
in: [
SecurityEventType.PLATFORM_ADMIN,
SecurityEventType.USER_MANAGEMENT,
],
},
timestamp: { gte: oneHourAgo },
},
});
if (adminActions >= this.config.thresholds.adminActionsPerHour) {
threats.push({
severity: AlertSeverity.MEDIUM,
type: AlertType.UNUSUAL_ADMIN_ACTIVITY,
title: "Unusual Admin Activity",
description: `User ${context.userId} performed ${adminActions} admin actions in 1 hour`,
eventType,
context,
metadata: { adminActions, ...metadata },
});
}
}
// Rate limiting violations
if (outcome === AuditOutcome.RATE_LIMITED && context.ipAddress) {
const oneMinuteAgo = new Date(now.getTime() - 60 * 1000);
const rateLimitViolations = await prisma.securityAuditLog.count({
where: {
outcome: AuditOutcome.RATE_LIMITED,
ipAddress: context.ipAddress,
timestamp: { gte: oneMinuteAgo },
},
});
if (
rateLimitViolations >=
this.config.thresholds.rateLimitViolationsPerMinute
) {
threats.push({
severity: AlertSeverity.MEDIUM,
type: AlertType.RATE_LIMIT_BREACH,
title: "Rate Limit Breach",
description: `IP ${context.ipAddress} exceeded rate limits ${rateLimitViolations} times in 1 minute`,
eventType,
context,
metadata: { violations: rateLimitViolations, ...metadata },
});
}
}
return threats;
}
private async detectAnomalies(
eventType: SecurityEventType,
context: AuditLogContext
): Promise<AnomalyDetectionResult> {
// Simple anomaly detection based on historical patterns
const now = new Date();
const sevenDaysAgo = new Date(now.getTime() - 7 * 24 * 60 * 60 * 1000);
// Get historical data for baseline
const historicalEvents = await prisma.securityAuditLog.findMany({
where: {
eventType,
timestamp: { gte: sevenDaysAgo, lt: now },
},
});
// Check for unusual time patterns
const currentHour = now.getHours();
const hourlyEvents = (historicalEvents || []).filter(
(e) => e.timestamp.getHours() === currentHour
);
const avgHourlyEvents = hourlyEvents.length / 7; // 7 days average
const recentHourEvents = this.eventBuffer.filter(
(e) =>
e.eventType === eventType &&
e.timestamp.getHours() === currentHour &&
e.timestamp > new Date(now.getTime() - 60 * 60 * 1000)
).length;
// Check for geographical anomalies
if (context.country && context.userId) {
const userCountries = new Set(
(historicalEvents || [])
.filter((e) => e.userId === context.userId && e.country)
.map((e) => e.country)
);
if (userCountries.size > 0 && !userCountries.has(context.country)) {
return {
isAnomaly: true,
confidence: 0.8,
type: "geographical_anomaly",
description: `User accessing from unusual country: ${context.country}`,
recommendedActions: [
"Verify user identity",
"Check for compromised credentials",
"Consider additional authentication",
],
};
}
}
// Check for time-based anomalies
if (recentHourEvents > avgHourlyEvents * 3 && avgHourlyEvents > 0) {
return {
isAnomaly: true,
confidence: 0.7,
type: "temporal_anomaly",
description: `Unusual activity spike: ${recentHourEvents} events vs ${avgHourlyEvents.toFixed(1)} average`,
recommendedActions: [
"Investigate source of increased activity",
"Check for automated attacks",
"Review recent system changes",
],
};
}
return {
isAnomaly: false,
confidence: 0,
type: "normal",
description: "No anomalies detected",
recommendedActions: [],
};
}
private async createAlert(
alertData: Omit<SecurityAlert, "id" | "timestamp" | "acknowledged">
): Promise<void> {
// Check for duplicate suppression
const suppressionWindow = new Date(
Date.now() - this.config.alerting.suppressDuplicateMinutes * 60 * 1000
);
const isDuplicate = this.alerts.some(
(a) =>
a.type === alertData.type &&
a.context.ipAddress === alertData.context.ipAddress &&
a.timestamp > suppressionWindow
);
if (isDuplicate) return;
const alert: SecurityAlert = {
id: crypto.randomUUID(),
timestamp: new Date(),
acknowledged: false,
...alertData,
};
this.alerts.push(alert);
// Log alert creation
await securityAuditLogger.log({
eventType: SecurityEventType.SYSTEM_CONFIG,
action: "security_alert_created",
outcome: AuditOutcome.SUCCESS,
severity: this.mapAlertSeverityToAuditSeverity(alert.severity),
context: alert.context,
errorMessage: undefined,
});
// Send notifications if enabled
if (this.config.alerting.enabled) {
await this.sendAlertNotifications(alert);
}
}
private async sendAlertNotifications(alert: SecurityAlert): Promise<void> {
// In production, integrate with actual notification services
console.error(
`🚨 SECURITY ALERT [${alert.severity}] ${alert.type}: ${alert.title}`
);
console.error(`Description: ${alert.description}`);
console.error("Context:", alert.context);
// Example integrations you could implement:
// - Email notifications
// - Slack webhooks
// - PagerDuty alerts
// - SMS notifications
// - Custom webhook endpoints
}
private async calculateUserRiskScores(
events: any[]
): Promise<Array<{ userId: string; email: string; riskScore: number }>> {
const userEvents = events.filter((e) => e.userId);
const userScores = new Map<
string,
{ email: string; score: number; events: any[] }
>();
for (const event of userEvents) {
if (!userScores.has(event.userId)) {
userScores.set(event.userId, {
email: event.user?.email || "unknown",
score: 0,
events: [],
});
}
userScores.get(event.userId)?.events.push(event);
}
const riskScores: Array<{
userId: string;
email: string;
riskScore: number;
}> = [];
for (const [userId, userData] of userScores) {
let riskScore = 0;
// Failed authentication attempts
const failedAuth = userData.events.filter(
(e) =>
e.eventType === SecurityEventType.AUTHENTICATION &&
e.outcome === AuditOutcome.FAILURE
).length;
riskScore += failedAuth * 10;
// Rate limit violations
const rateLimited = userData.events.filter(
(e) => e.outcome === AuditOutcome.RATE_LIMITED
).length;
riskScore += rateLimited * 15;
// Critical events
const criticalEvents = userData.events.filter(
(e) => e.severity === AuditSeverity.CRITICAL
).length;
riskScore += criticalEvents * 25;
// Multiple countries
const countries = new Set(
userData.events.map((e) => e.country).filter(Boolean)
);
if (countries.size > 2) riskScore += 20;
// Normalize score to 0-100 range
riskScore = Math.min(100, riskScore);
riskScores.push({
userId,
email: userData.email,
riskScore,
});
}
return riskScores.sort((a, b) => b.riskScore - a.riskScore).slice(0, 10);
}
private calculateSecurityScore(data: {
totalEvents: number;
criticalEvents: number;
activeAlerts: number;
topThreats: Array<{ type: AlertType; count: number }>;
}): number {
let score = 100;
// Deduct points for critical events
score -= Math.min(30, data.criticalEvents * 2);
// Deduct points for active alerts
score -= Math.min(25, data.activeAlerts * 3);
// Deduct points for high-severity threats
const highSeverityThreats = data.topThreats.filter((t) =>
[
AlertType.BRUTE_FORCE_ATTACK,
AlertType.DATA_BREACH_ATTEMPT,
AlertType.PRIVILEGE_ESCALATION,
].includes(t.type)
);
score -= Math.min(
20,
highSeverityThreats.reduce((sum, t) => sum + t.count, 0) * 5
);
// Deduct points for high event volume (potential attacks)
if (data.totalEvents > 1000) {
score -= Math.min(15, (data.totalEvents - 1000) / 100);
}
return Math.max(0, Math.round(score));
}
private determineThreatLevel(
securityScore: number,
activeAlerts: number,
criticalEvents: number
): ThreatLevel {
if (securityScore < 50 || activeAlerts >= 5 || criticalEvents >= 3) {
return ThreatLevel.CRITICAL;
}
if (securityScore < 70 || activeAlerts >= 3 || criticalEvents >= 2) {
return ThreatLevel.HIGH;
}
if (securityScore < 85 || activeAlerts >= 1 || criticalEvents >= 1) {
return ThreatLevel.MODERATE;
}
return ThreatLevel.LOW;
}
private mapConfidenceToSeverity(confidence: number): AlertSeverity {
if (confidence >= 0.9) return AlertSeverity.CRITICAL;
if (confidence >= 0.8) return AlertSeverity.HIGH;
if (confidence >= 0.6) return AlertSeverity.MEDIUM;
return AlertSeverity.LOW;
}
private mapAlertSeverityToAuditSeverity(
severity: AlertSeverity
): AuditSeverity {
switch (severity) {
case AlertSeverity.CRITICAL:
return AuditSeverity.CRITICAL;
case AlertSeverity.HIGH:
return AuditSeverity.HIGH;
case AlertSeverity.MEDIUM:
return AuditSeverity.MEDIUM;
case AlertSeverity.LOW:
return AuditSeverity.LOW;
}
}
private getDefaultConfig(): MonitoringConfig {
return {
thresholds: {
failedLoginsPerMinute: 5,
failedLoginsPerHour: 20,
rateLimitViolationsPerMinute: 10,
cspViolationsPerMinute: 15,
adminActionsPerHour: 25,
massDataAccessThreshold: 100,
suspiciousIPThreshold: 10,
},
alerting: {
enabled: process.env.SECURITY_ALERTING_ENABLED !== "false",
channels: [AlertChannel.EMAIL],
suppressDuplicateMinutes: 10,
escalationTimeoutMinutes: 60,
},
retention: {
alertRetentionDays: 90,
metricsRetentionDays: 365,
},
};
}
private startBackgroundProcessing(): void {
// Clean up old data every hour
setInterval(
() => {
this.cleanupOldData();
},
60 * 60 * 1000
);
// Process event buffer every 30 seconds
setInterval(() => {
this.processEventBuffer();
}, 30 * 1000);
}
private cleanupEventBuffer(): void {
const oneHourAgo = new Date(Date.now() - 60 * 60 * 1000);
this.eventBuffer = this.eventBuffer.filter(
(e) => e.timestamp >= oneHourAgo
);
}
private cleanupOldData(): void {
const alertCutoff = new Date(
Date.now() -
this.config.retention.alertRetentionDays * 24 * 60 * 60 * 1000
);
this.alerts = this.alerts.filter((a) => a.timestamp >= alertCutoff);
this.cleanupEventBuffer();
}
private async processEventBuffer(): Promise<void> {
// Analyze patterns in event buffer for real-time threat detection
const now = new Date();
const oneMinuteAgo = new Date(now.getTime() - 60 * 1000);
const recentEvents = this.eventBuffer.filter(
(e) => e.timestamp >= oneMinuteAgo
);
// Check for event spikes
if (recentEvents.length > 50) {
await this.createAlert({
severity: AlertSeverity.MEDIUM,
type: AlertType.SUSPICIOUS_IP_ACTIVITY,
title: "High Event Volume Detected",
description: `${recentEvents.length} security events in the last minute`,
eventType: SecurityEventType.API_SECURITY,
context: { requestId: crypto.randomUUID() },
metadata: { eventCount: recentEvents.length },
});
}
}
}
// Singleton instance
export const securityMonitoring = new SecurityMonitoringService();
// Helper function to integrate with existing audit logger
export async function enhancedSecurityLog(
eventType: SecurityEventType,
action: string,
outcome: AuditOutcome,
context: AuditLogContext,
severity: AuditSeverity = AuditSeverity.INFO,
errorMessage?: string,
metadata?: Record<string, any>
): Promise<void> {
// Log to audit system
await securityAuditLogger.log({
eventType,
action,
outcome,
severity,
errorMessage,
context,
});
// Process through security monitoring
await securityMonitoring.processSecurityEvent(
eventType,
outcome,
context,
severity,
metadata
);
}

View File

@ -47,7 +47,7 @@ export async function sendEmail(
console.log("📧 [DEV] Email would be sent:", {
to: options.to,
subject: options.subject,
text: options.text?.substring(0, 100) + "...",
text: `${options.text?.substring(0, 100)}...`,
});
return { success: true };
}

View File

@ -13,9 +13,9 @@ import { getServerSession } from "next-auth/next";
import superjson from "superjson";
import type { z } from "zod";
import { authOptions } from "./auth";
import { CSRFProtection } from "./csrf";
import { prisma } from "./prisma";
import { validateInput } from "./validation";
import { CSRFProtection } from "./csrf";
/**
* Create context for tRPC requests
@ -169,7 +169,7 @@ const enforceCSRFProtection = t.middleware(async ({ ctx, next }) => {
method: request.method,
headers: request.headers,
body: request.body,
}) as any;
}) as unknown as NextRequest;
// Validate CSRF token
const validation = await CSRFProtection.validateRequest(nextRequest);
@ -198,7 +198,12 @@ export const rateLimitedProcedure = publicProcedure.use(
/**
* CSRF-protected procedures for state-changing operations
*/
export const csrfProtectedProcedure = publicProcedure.use(enforceCSRFProtection);
export const csrfProtectedAuthProcedure = csrfProtectedProcedure.use(enforceUserIsAuthed);
export const csrfProtectedCompanyProcedure = csrfProtectedProcedure.use(enforceCompanyAccess);
export const csrfProtectedAdminProcedure = csrfProtectedProcedure.use(enforceAdminAccess);
export const csrfProtectedProcedure = publicProcedure.use(
enforceCSRFProtection
);
export const csrfProtectedAuthProcedure =
csrfProtectedProcedure.use(enforceUserIsAuthed);
export const csrfProtectedCompanyProcedure =
csrfProtectedProcedure.use(enforceCompanyAccess);
export const csrfProtectedAdminProcedure =
csrfProtectedProcedure.use(enforceAdminAccess);