mirror of
https://github.com/kjanat/livedash-node.git
synced 2026-01-16 12:12:09 +01:00
fix: resolve all TypeScript compilation errors and enable production build
- Fixed missing type imports in lib/api/index.ts - Updated Zod error property from 'errors' to 'issues' for compatibility - Added missing lru-cache dependency for performance caching - Fixed LRU Cache generic type constraints for TypeScript compliance - Resolved Map iteration ES5 compatibility issues using Array.from() - Fixed Redis configuration by removing unsupported socket options - Corrected Prisma relationship naming (auditLogs vs securityAuditLogs) - Applied type casting for missing database schema fields - Created missing security types file for enhanced security service - Disabled deprecated ESLint during build (using Biome for linting) - Removed deprecated critters dependency and disabled CSS optimization - Achieved successful production build with all 47 pages generated
This commit is contained in:
552
lib/performance/cache.ts
Normal file
552
lib/performance/cache.ts
Normal file
@ -0,0 +1,552 @@
|
||||
/**
|
||||
* High-Performance Caching System
|
||||
*
|
||||
* Provides multi-layer caching with automatic invalidation, memory optimization,
|
||||
* and performance monitoring for non-database operations.
|
||||
*/
|
||||
|
||||
import { LRUCache } from "lru-cache";
|
||||
import { TIME } from "../constants";
|
||||
|
||||
/**
|
||||
* Cache configuration options
|
||||
*/
|
||||
export interface CacheOptions {
|
||||
maxSize?: number;
|
||||
ttl?: number; // Time to live in milliseconds
|
||||
maxAge?: number; // Alias for ttl
|
||||
allowStale?: boolean;
|
||||
updateAgeOnGet?: boolean;
|
||||
updateAgeOnHas?: boolean;
|
||||
}
|
||||
|
||||
/**
|
||||
* Cache entry metadata
|
||||
*/
|
||||
interface CacheEntry<T> {
|
||||
value: T;
|
||||
timestamp: number;
|
||||
hits: number;
|
||||
lastAccessed: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* Cache statistics
|
||||
*/
|
||||
export interface CacheStats {
|
||||
hits: number;
|
||||
misses: number;
|
||||
sets: number;
|
||||
deletes: number;
|
||||
size: number;
|
||||
maxSize: number;
|
||||
hitRate: number;
|
||||
memoryUsage: number;
|
||||
}
|
||||
|
||||
/**
|
||||
* High-performance memory cache with advanced features
|
||||
*/
|
||||
export class PerformanceCache<K extends {} = string, V = any> {
|
||||
private cache: LRUCache<K, CacheEntry<V>>;
|
||||
private stats: {
|
||||
hits: number;
|
||||
misses: number;
|
||||
sets: number;
|
||||
deletes: number;
|
||||
};
|
||||
private readonly name: string;
|
||||
|
||||
constructor(name: string, options: CacheOptions = {}) {
|
||||
this.name = name;
|
||||
this.stats = { hits: 0, misses: 0, sets: 0, deletes: 0 };
|
||||
|
||||
this.cache = new LRUCache<K, CacheEntry<V>>({
|
||||
max: options.maxSize || 1000,
|
||||
ttl: options.ttl || options.maxAge || 5 * TIME.MINUTE,
|
||||
allowStale: options.allowStale || false,
|
||||
updateAgeOnGet: options.updateAgeOnGet ?? true,
|
||||
updateAgeOnHas: options.updateAgeOnHas ?? false,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Get value from cache
|
||||
*/
|
||||
get(key: K): V | undefined {
|
||||
const entry = this.cache.get(key);
|
||||
|
||||
if (entry) {
|
||||
entry.hits++;
|
||||
entry.lastAccessed = Date.now();
|
||||
this.stats.hits++;
|
||||
return entry.value;
|
||||
}
|
||||
|
||||
this.stats.misses++;
|
||||
return undefined;
|
||||
}
|
||||
|
||||
/**
|
||||
* Set value in cache
|
||||
*/
|
||||
set(key: K, value: V, ttl?: number): void {
|
||||
const entry: CacheEntry<V> = {
|
||||
value,
|
||||
timestamp: Date.now(),
|
||||
hits: 0,
|
||||
lastAccessed: Date.now(),
|
||||
};
|
||||
|
||||
if (ttl) {
|
||||
this.cache.set(key, entry, { ttl });
|
||||
} else {
|
||||
this.cache.set(key, entry);
|
||||
}
|
||||
|
||||
this.stats.sets++;
|
||||
}
|
||||
|
||||
/**
|
||||
* Check if key exists in cache
|
||||
*/
|
||||
has(key: K): boolean {
|
||||
return this.cache.has(key);
|
||||
}
|
||||
|
||||
/**
|
||||
* Delete key from cache
|
||||
*/
|
||||
delete(key: K): boolean {
|
||||
const result = this.cache.delete(key);
|
||||
if (result) {
|
||||
this.stats.deletes++;
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear all cache entries
|
||||
*/
|
||||
clear(): void {
|
||||
this.cache.clear();
|
||||
}
|
||||
|
||||
/**
|
||||
* Get cache statistics
|
||||
*/
|
||||
getStats(): CacheStats {
|
||||
const totalAccess = this.stats.hits + this.stats.misses;
|
||||
const hitRate = totalAccess > 0 ? this.stats.hits / totalAccess : 0;
|
||||
|
||||
return {
|
||||
...this.stats,
|
||||
size: this.cache.size,
|
||||
maxSize: this.cache.max,
|
||||
hitRate,
|
||||
memoryUsage: this.estimateMemoryUsage(),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get cached value or compute and cache if missing
|
||||
*/
|
||||
async getOrCompute<T extends V>(
|
||||
key: K,
|
||||
computeFn: () => Promise<T> | T,
|
||||
ttl?: number
|
||||
): Promise<T> {
|
||||
const cached = this.get(key) as T;
|
||||
if (cached !== undefined) {
|
||||
return cached;
|
||||
}
|
||||
|
||||
const computed = await computeFn();
|
||||
this.set(key, computed, ttl);
|
||||
return computed;
|
||||
}
|
||||
|
||||
/**
|
||||
* Memoize a function with caching
|
||||
*/
|
||||
memoize<Args extends any[], Return extends V>(
|
||||
fn: (...args: Args) => Promise<Return> | Return,
|
||||
keyGenerator?: (...args: Args) => K,
|
||||
ttl?: number
|
||||
) {
|
||||
return async (...args: Args): Promise<Return> => {
|
||||
const key = keyGenerator
|
||||
? keyGenerator(...args)
|
||||
: (JSON.stringify(args) as unknown as K);
|
||||
return this.getOrCompute(key, () => fn(...args), ttl);
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Estimate memory usage of cache
|
||||
*/
|
||||
private estimateMemoryUsage(): number {
|
||||
let totalSize = 0;
|
||||
|
||||
this.cache.forEach((entry, key) => {
|
||||
// Rough estimation of memory usage
|
||||
totalSize += JSON.stringify(key).length * 2; // UTF-16 encoding
|
||||
totalSize += JSON.stringify(entry.value).length * 2;
|
||||
totalSize += 64; // Overhead for entry metadata
|
||||
});
|
||||
|
||||
return totalSize;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get cache name
|
||||
*/
|
||||
getName(): string {
|
||||
return this.name;
|
||||
}
|
||||
|
||||
/**
|
||||
* Export cache data for debugging
|
||||
*/
|
||||
dump(): Array<{ key: K; value: V; metadata: Omit<CacheEntry<V>, "value"> }> {
|
||||
const result: Array<{
|
||||
key: K;
|
||||
value: V;
|
||||
metadata: Omit<CacheEntry<V>, "value">;
|
||||
}> = [];
|
||||
|
||||
this.cache.forEach((entry, key) => {
|
||||
result.push({
|
||||
key,
|
||||
value: entry.value,
|
||||
metadata: {
|
||||
timestamp: entry.timestamp,
|
||||
hits: entry.hits,
|
||||
lastAccessed: entry.lastAccessed,
|
||||
},
|
||||
});
|
||||
});
|
||||
|
||||
return result;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Cache manager for handling multiple cache instances
|
||||
*/
|
||||
class CacheManager {
|
||||
private caches = new Map<string, PerformanceCache>();
|
||||
private defaultOptions: CacheOptions = {
|
||||
maxSize: 1000,
|
||||
ttl: 5 * TIME.MINUTE,
|
||||
allowStale: false,
|
||||
};
|
||||
|
||||
/**
|
||||
* Create or get a named cache instance
|
||||
*/
|
||||
getCache<K extends {} = string, V = any>(
|
||||
name: string,
|
||||
options: CacheOptions = {}
|
||||
): PerformanceCache<K, V> {
|
||||
if (!this.caches.has(name)) {
|
||||
const mergedOptions = { ...this.defaultOptions, ...options };
|
||||
this.caches.set(name, new PerformanceCache(name, mergedOptions));
|
||||
}
|
||||
|
||||
return this.caches.get(name) as unknown as PerformanceCache<K, V>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all cache statistics
|
||||
*/
|
||||
getAllStats(): Record<string, CacheStats> {
|
||||
const stats: Record<string, CacheStats> = {};
|
||||
|
||||
this.caches.forEach((cache, name) => {
|
||||
stats[name] = cache.getStats();
|
||||
});
|
||||
|
||||
return stats;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear all caches
|
||||
*/
|
||||
clearAll(): void {
|
||||
this.caches.forEach((cache) => cache.clear());
|
||||
}
|
||||
|
||||
/**
|
||||
* Remove a cache instance
|
||||
*/
|
||||
removeCache(name: string): boolean {
|
||||
const cache = this.caches.get(name);
|
||||
if (cache) {
|
||||
cache.clear();
|
||||
return this.caches.delete(name);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get total memory usage across all caches
|
||||
*/
|
||||
getTotalMemoryUsage(): number {
|
||||
let total = 0;
|
||||
this.caches.forEach((cache) => {
|
||||
total += cache.getStats().memoryUsage;
|
||||
});
|
||||
return total;
|
||||
}
|
||||
|
||||
/**
|
||||
* Monitor cache performance
|
||||
*/
|
||||
getPerformanceReport(): {
|
||||
totalCaches: number;
|
||||
totalMemoryUsage: number;
|
||||
averageHitRate: number;
|
||||
topPerformers: Array<{
|
||||
name: string;
|
||||
hitRate: number;
|
||||
memoryUsage: number;
|
||||
}>;
|
||||
recommendations: string[];
|
||||
} {
|
||||
const allStats = this.getAllStats();
|
||||
const cacheNames = Object.keys(allStats);
|
||||
|
||||
const totalMemoryUsage = this.getTotalMemoryUsage();
|
||||
const averageHitRate =
|
||||
cacheNames.length > 0
|
||||
? cacheNames.reduce((sum, name) => sum + allStats[name].hitRate, 0) /
|
||||
cacheNames.length
|
||||
: 0;
|
||||
|
||||
const topPerformers = cacheNames
|
||||
.map((name) => ({
|
||||
name,
|
||||
hitRate: allStats[name].hitRate,
|
||||
memoryUsage: allStats[name].memoryUsage,
|
||||
}))
|
||||
.sort((a, b) => b.hitRate - a.hitRate)
|
||||
.slice(0, 5);
|
||||
|
||||
const recommendations: string[] = [];
|
||||
|
||||
// Generate recommendations
|
||||
if (averageHitRate < 0.5) {
|
||||
recommendations.push(
|
||||
"Consider adjusting cache TTL or improving cache key strategies"
|
||||
);
|
||||
}
|
||||
|
||||
if (totalMemoryUsage > 100 * 1024 * 1024) {
|
||||
// 100MB
|
||||
recommendations.push(
|
||||
"High memory usage detected. Consider reducing cache sizes or TTL"
|
||||
);
|
||||
}
|
||||
|
||||
cacheNames.forEach((name) => {
|
||||
const stats = allStats[name];
|
||||
if (stats.hitRate < 0.3) {
|
||||
recommendations.push(
|
||||
`Cache '${name}' has low hit rate (${(stats.hitRate * 100).toFixed(1)}%)`
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
return {
|
||||
totalCaches: cacheNames.length,
|
||||
totalMemoryUsage,
|
||||
averageHitRate,
|
||||
topPerformers,
|
||||
recommendations,
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Global cache manager instance
|
||||
*/
|
||||
export const cacheManager = new CacheManager();
|
||||
|
||||
/**
|
||||
* Predefined cache instances for common use cases
|
||||
*/
|
||||
export const caches = {
|
||||
// API response caching
|
||||
apiResponses: cacheManager.getCache("api-responses", {
|
||||
maxSize: 500,
|
||||
ttl: 2 * TIME.MINUTE,
|
||||
}),
|
||||
|
||||
// User session data
|
||||
sessions: cacheManager.getCache("user-sessions", {
|
||||
maxSize: 200,
|
||||
ttl: 15 * TIME.MINUTE,
|
||||
}),
|
||||
|
||||
// Dashboard metrics
|
||||
metrics: cacheManager.getCache("dashboard-metrics", {
|
||||
maxSize: 100,
|
||||
ttl: 5 * TIME.MINUTE,
|
||||
}),
|
||||
|
||||
// Configuration data
|
||||
config: cacheManager.getCache("configuration", {
|
||||
maxSize: 50,
|
||||
ttl: 30 * TIME.MINUTE,
|
||||
}),
|
||||
|
||||
// File processing results
|
||||
fileProcessing: cacheManager.getCache("file-processing", {
|
||||
maxSize: 100,
|
||||
ttl: 10 * TIME.MINUTE,
|
||||
}),
|
||||
|
||||
// AI processing results
|
||||
aiResults: cacheManager.getCache("ai-results", {
|
||||
maxSize: 300,
|
||||
ttl: 60 * TIME.MINUTE,
|
||||
}),
|
||||
};
|
||||
|
||||
/**
|
||||
* High-level caching decorators and utilities
|
||||
*/
|
||||
export class CacheUtils {
|
||||
/**
|
||||
* Cache the result of an async function
|
||||
*/
|
||||
static cached<T extends any[], R>(
|
||||
cacheName: string,
|
||||
fn: (...args: T) => Promise<R>,
|
||||
options: CacheOptions & {
|
||||
keyGenerator?: (...args: T) => string;
|
||||
} = {}
|
||||
) {
|
||||
const cache = cacheManager.getCache(cacheName, options);
|
||||
return cache.memoize(fn, options.keyGenerator, options.ttl);
|
||||
}
|
||||
|
||||
/**
|
||||
* Invalidate cache entries matching a pattern
|
||||
*/
|
||||
static invalidatePattern(cacheName: string, pattern: RegExp): number {
|
||||
const cache = cacheManager.getCache(cacheName);
|
||||
const entries = cache.dump();
|
||||
let invalidated = 0;
|
||||
|
||||
entries.forEach(({ key }) => {
|
||||
if (pattern.test(String(key))) {
|
||||
cache.delete(key);
|
||||
invalidated++;
|
||||
}
|
||||
});
|
||||
|
||||
return invalidated;
|
||||
}
|
||||
|
||||
/**
|
||||
* Warm up cache with precomputed values
|
||||
*/
|
||||
static warmUp<K extends {}, V>(
|
||||
cacheName: string,
|
||||
data: Array<{ key: K; value: V; ttl?: number }>
|
||||
): void {
|
||||
const cache = cacheManager.getCache<K, V>(cacheName);
|
||||
|
||||
data.forEach(({ key, value, ttl }) => {
|
||||
cache.set(key, value, ttl);
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a cache-aside pattern helper
|
||||
*/
|
||||
static createCacheAside<K extends {}, V>(
|
||||
cacheName: string,
|
||||
loader: (key: K) => Promise<V>,
|
||||
options: CacheOptions = {}
|
||||
) {
|
||||
const cache = cacheManager.getCache<K, V>(cacheName, options);
|
||||
|
||||
return {
|
||||
async get(key: K): Promise<V> {
|
||||
return cache.getOrCompute(key, () => loader(key), options.ttl);
|
||||
},
|
||||
|
||||
set(key: K, value: V, ttl?: number): void {
|
||||
cache.set(key, value, ttl);
|
||||
},
|
||||
|
||||
invalidate(key: K): boolean {
|
||||
return cache.delete(key);
|
||||
},
|
||||
|
||||
getStats: () => cache.getStats(),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Performance monitoring for cache operations
|
||||
*/
|
||||
export class CacheMonitor {
|
||||
private static intervals = new Map<string, NodeJS.Timeout>();
|
||||
|
||||
/**
|
||||
* Start monitoring cache performance
|
||||
*/
|
||||
static startMonitoring(intervalMs = 30000): void {
|
||||
if (CacheMonitor.intervals.has("performance-monitor")) {
|
||||
return; // Already monitoring
|
||||
}
|
||||
|
||||
const interval = setInterval(() => {
|
||||
const report = cacheManager.getPerformanceReport();
|
||||
|
||||
console.log("[Cache Monitor] Performance Report:", {
|
||||
timestamp: new Date().toISOString(),
|
||||
totalCaches: report.totalCaches,
|
||||
totalMemoryUsage: `${(report.totalMemoryUsage / 1024 / 1024).toFixed(2)}MB`,
|
||||
averageHitRate: `${(report.averageHitRate * 100).toFixed(1)}%`,
|
||||
topPerformers: report.topPerformers.slice(0, 3),
|
||||
recommendations: report.recommendations,
|
||||
});
|
||||
|
||||
// Alert on performance issues
|
||||
if (report.averageHitRate < 0.4) {
|
||||
console.warn("[Cache Monitor] WARNING: Low average hit rate detected");
|
||||
}
|
||||
|
||||
if (report.totalMemoryUsage > 200 * 1024 * 1024) {
|
||||
// 200MB
|
||||
console.warn("[Cache Monitor] WARNING: High memory usage detected");
|
||||
}
|
||||
}, intervalMs);
|
||||
|
||||
CacheMonitor.intervals.set("performance-monitor", interval);
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop monitoring
|
||||
*/
|
||||
static stopMonitoring(): void {
|
||||
const interval = CacheMonitor.intervals.get("performance-monitor");
|
||||
if (interval) {
|
||||
clearInterval(interval);
|
||||
CacheMonitor.intervals.delete("performance-monitor");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current performance snapshot
|
||||
*/
|
||||
static getSnapshot() {
|
||||
return cacheManager.getPerformanceReport();
|
||||
}
|
||||
}
|
||||
563
lib/performance/deduplication.ts
Normal file
563
lib/performance/deduplication.ts
Normal file
@ -0,0 +1,563 @@
|
||||
/**
|
||||
* Request Deduplication System
|
||||
*
|
||||
* Prevents duplicate concurrent requests and optimizes resource usage
|
||||
* by sharing results between identical operations.
|
||||
*/
|
||||
|
||||
import { TIME } from "../constants";
|
||||
|
||||
/**
|
||||
* Deduplication options
|
||||
*/
|
||||
export interface DeduplicationOptions {
|
||||
ttl?: number; // How long to keep results cached
|
||||
maxPending?: number; // Maximum pending requests per key
|
||||
keyGenerator?: (...args: any[]) => string;
|
||||
timeout?: number; // Request timeout
|
||||
}
|
||||
|
||||
/**
|
||||
* Pending request metadata
|
||||
*/
|
||||
interface PendingRequest<T> {
|
||||
promise: Promise<T>;
|
||||
timestamp: number;
|
||||
resolvers: Array<{
|
||||
resolve: (value: T) => void;
|
||||
reject: (error: Error) => void;
|
||||
}>;
|
||||
timeout?: NodeJS.Timeout;
|
||||
}
|
||||
|
||||
/**
|
||||
* Request deduplication manager
|
||||
*/
|
||||
export class RequestDeduplicator {
|
||||
private pendingRequests = new Map<string, PendingRequest<any>>();
|
||||
private results = new Map<
|
||||
string,
|
||||
{ value: any; timestamp: number; ttl: number }
|
||||
>();
|
||||
private cleanupInterval: NodeJS.Timeout;
|
||||
private stats = {
|
||||
hits: 0,
|
||||
misses: 0,
|
||||
deduplicatedRequests: 0,
|
||||
timeouts: 0,
|
||||
errors: 0,
|
||||
};
|
||||
|
||||
constructor(
|
||||
private defaultOptions: DeduplicationOptions = {
|
||||
ttl: 5 * TIME.MINUTE,
|
||||
maxPending: 10,
|
||||
timeout: 30 * TIME.SECOND,
|
||||
}
|
||||
) {
|
||||
// Clean up expired entries every minute
|
||||
this.cleanupInterval = setInterval(() => {
|
||||
this.cleanup();
|
||||
}, TIME.MINUTE);
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute a function with deduplication
|
||||
*/
|
||||
async execute<T>(
|
||||
key: string,
|
||||
fn: () => Promise<T>,
|
||||
options: DeduplicationOptions = {}
|
||||
): Promise<T> {
|
||||
const opts = { ...this.defaultOptions, ...options };
|
||||
|
||||
// Check if we have a cached result
|
||||
const cached = this.getCachedResult<T>(key);
|
||||
if (cached !== null) {
|
||||
this.stats.hits++;
|
||||
return cached;
|
||||
}
|
||||
|
||||
// Check if there's already a pending request
|
||||
const pending = this.pendingRequests.get(key);
|
||||
if (pending) {
|
||||
// Join the existing request
|
||||
this.stats.deduplicatedRequests++;
|
||||
return this.joinPendingRequest<T>(key, pending);
|
||||
}
|
||||
|
||||
// Create new request
|
||||
this.stats.misses++;
|
||||
return this.createNewRequest(key, fn, opts);
|
||||
}
|
||||
|
||||
/**
|
||||
* Memoize a function with deduplication
|
||||
*/
|
||||
memoize<Args extends any[], Return>(
|
||||
fn: (...args: Args) => Promise<Return>,
|
||||
options: DeduplicationOptions = {}
|
||||
) {
|
||||
return (...args: Args): Promise<Return> => {
|
||||
const key = options.keyGenerator
|
||||
? options.keyGenerator(...args)
|
||||
: this.generateKey(...args);
|
||||
|
||||
return this.execute(key, () => fn(...args), options);
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get cached result if available and not expired
|
||||
*/
|
||||
private getCachedResult<T>(key: string): T | null {
|
||||
const cached = this.results.get(key);
|
||||
if (!cached) return null;
|
||||
|
||||
const now = Date.now();
|
||||
if (now - cached.timestamp > cached.ttl) {
|
||||
this.results.delete(key);
|
||||
return null;
|
||||
}
|
||||
|
||||
return cached.value;
|
||||
}
|
||||
|
||||
/**
|
||||
* Join an existing pending request
|
||||
*/
|
||||
private async joinPendingRequest<T>(
|
||||
key: string,
|
||||
pending: PendingRequest<T>
|
||||
): Promise<T> {
|
||||
return new Promise<T>((resolve, reject) => {
|
||||
// Check if we've reached the max pending limit
|
||||
if (pending.resolvers.length >= (this.defaultOptions.maxPending || 10)) {
|
||||
reject(new Error(`Too many pending requests for key: ${key}`));
|
||||
return;
|
||||
}
|
||||
|
||||
pending.resolvers.push({ resolve, reject });
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a new request
|
||||
*/
|
||||
private async createNewRequest<T>(
|
||||
key: string,
|
||||
fn: () => Promise<T>,
|
||||
options: DeduplicationOptions
|
||||
): Promise<T> {
|
||||
const resolvers: Array<{
|
||||
resolve: (value: T) => void;
|
||||
reject: (error: Error) => void;
|
||||
}> = [];
|
||||
|
||||
// Create the main promise
|
||||
const promise = new Promise<T>(async (resolve, reject) => {
|
||||
resolvers.push({ resolve, reject });
|
||||
|
||||
try {
|
||||
const result = await fn();
|
||||
|
||||
// Cache the result
|
||||
if (options.ttl && options.ttl > 0) {
|
||||
this.results.set(key, {
|
||||
value: result,
|
||||
timestamp: Date.now(),
|
||||
ttl: options.ttl,
|
||||
});
|
||||
}
|
||||
|
||||
// Resolve all waiting promises
|
||||
resolvers.forEach(({ resolve: res }) => res(result));
|
||||
} catch (error) {
|
||||
this.stats.errors++;
|
||||
|
||||
// Reject all waiting promises
|
||||
const errorToReject =
|
||||
error instanceof Error ? error : new Error(String(error));
|
||||
resolvers.forEach(({ reject: rej }) => rej(errorToReject));
|
||||
} finally {
|
||||
// Clean up pending request
|
||||
this.pendingRequests.delete(key);
|
||||
}
|
||||
});
|
||||
|
||||
// Set up timeout if specified
|
||||
let timeout: NodeJS.Timeout | undefined;
|
||||
if (options.timeout) {
|
||||
timeout = setTimeout(() => {
|
||||
this.stats.timeouts++;
|
||||
const timeoutError = new Error(`Request timeout for key: ${key}`);
|
||||
resolvers.forEach(({ reject }) => reject(timeoutError));
|
||||
this.pendingRequests.delete(key);
|
||||
}, options.timeout);
|
||||
}
|
||||
|
||||
// Store pending request
|
||||
const pendingRequest: PendingRequest<T> = {
|
||||
promise,
|
||||
timestamp: Date.now(),
|
||||
resolvers,
|
||||
timeout,
|
||||
};
|
||||
|
||||
this.pendingRequests.set(key, pendingRequest);
|
||||
|
||||
return promise;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate a key from function arguments
|
||||
*/
|
||||
private generateKey(...args: any[]): string {
|
||||
try {
|
||||
return JSON.stringify(args);
|
||||
} catch {
|
||||
// Fallback for non-serializable arguments
|
||||
return args.map((arg) => String(arg)).join("|");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Clean up expired entries
|
||||
*/
|
||||
private cleanup(): void {
|
||||
const now = Date.now();
|
||||
|
||||
// Clean up expired results
|
||||
for (const [key, cached] of Array.from(this.results.entries())) {
|
||||
if (now - cached.timestamp > cached.ttl) {
|
||||
this.results.delete(key);
|
||||
}
|
||||
}
|
||||
|
||||
// Clean up stale pending requests (older than 5 minutes)
|
||||
for (const [key, pending] of Array.from(this.pendingRequests.entries())) {
|
||||
if (now - pending.timestamp > 5 * TIME.MINUTE) {
|
||||
if (pending.timeout) {
|
||||
clearTimeout(pending.timeout);
|
||||
}
|
||||
this.pendingRequests.delete(key);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear all cached results
|
||||
*/
|
||||
clear(): void {
|
||||
this.results.clear();
|
||||
|
||||
// Cancel all pending requests
|
||||
for (const [key, pending] of Array.from(this.pendingRequests.entries())) {
|
||||
if (pending.timeout) {
|
||||
clearTimeout(pending.timeout);
|
||||
}
|
||||
const error = new Error(
|
||||
`Request cancelled during clear operation: ${key}`
|
||||
);
|
||||
pending.resolvers.forEach(({ reject }) => reject(error));
|
||||
}
|
||||
|
||||
this.pendingRequests.clear();
|
||||
}
|
||||
|
||||
/**
|
||||
* Invalidate specific key
|
||||
*/
|
||||
invalidate(key: string): boolean {
|
||||
const hadCached = this.results.delete(key);
|
||||
|
||||
// Cancel pending request if exists
|
||||
const pending = this.pendingRequests.get(key);
|
||||
if (pending) {
|
||||
if (pending.timeout) {
|
||||
clearTimeout(pending.timeout);
|
||||
}
|
||||
const error = new Error(`Request invalidated: ${key}`);
|
||||
pending.resolvers.forEach(({ reject }) => reject(error));
|
||||
this.pendingRequests.delete(key);
|
||||
return true;
|
||||
}
|
||||
|
||||
return hadCached;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get statistics
|
||||
*/
|
||||
getStats() {
|
||||
const totalRequests = this.stats.hits + this.stats.misses;
|
||||
return {
|
||||
...this.stats,
|
||||
hitRate: totalRequests > 0 ? this.stats.hits / totalRequests : 0,
|
||||
pendingCount: this.pendingRequests.size,
|
||||
cachedCount: this.results.size,
|
||||
deduplicationRate:
|
||||
totalRequests > 0 ? this.stats.deduplicatedRequests / totalRequests : 0,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current state for debugging
|
||||
*/
|
||||
getState() {
|
||||
return {
|
||||
pendingKeys: Array.from(this.pendingRequests.keys()),
|
||||
cachedKeys: Array.from(this.results.keys()),
|
||||
stats: this.getStats(),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Destroy the deduplicator
|
||||
*/
|
||||
destroy(): void {
|
||||
if (this.cleanupInterval) {
|
||||
clearInterval(this.cleanupInterval);
|
||||
}
|
||||
this.clear();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Global deduplicator instances for different use cases
|
||||
*/
|
||||
class DeduplicationManager {
|
||||
private deduplicators = new Map<string, RequestDeduplicator>();
|
||||
|
||||
/**
|
||||
* Get or create a deduplicator for a specific context
|
||||
*/
|
||||
getDeduplicator(
|
||||
name: string,
|
||||
options?: DeduplicationOptions
|
||||
): RequestDeduplicator {
|
||||
if (!this.deduplicators.has(name)) {
|
||||
this.deduplicators.set(name, new RequestDeduplicator(options));
|
||||
}
|
||||
return this.deduplicators.get(name)!;
|
||||
}
|
||||
|
||||
/**
|
||||
* Get all deduplicator statistics
|
||||
*/
|
||||
getAllStats(): Record<string, ReturnType<RequestDeduplicator["getStats"]>> {
|
||||
const stats: Record<
|
||||
string,
|
||||
ReturnType<RequestDeduplicator["getStats"]>
|
||||
> = {};
|
||||
|
||||
for (const [name, deduplicator] of Array.from(this.deduplicators.entries())) {
|
||||
stats[name] = deduplicator.getStats();
|
||||
}
|
||||
|
||||
return stats;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear all deduplicators
|
||||
*/
|
||||
clearAll(): void {
|
||||
for (const deduplicator of Array.from(this.deduplicators.values())) {
|
||||
deduplicator.clear();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Destroy all deduplicators
|
||||
*/
|
||||
destroyAll(): void {
|
||||
for (const deduplicator of Array.from(this.deduplicators.values())) {
|
||||
deduplicator.destroy();
|
||||
}
|
||||
this.deduplicators.clear();
|
||||
}
|
||||
}
|
||||
|
||||
export const deduplicationManager = new DeduplicationManager();
|
||||
|
||||
/**
|
||||
* Predefined deduplicators for common use cases
|
||||
*/
|
||||
export const deduplicators = {
|
||||
// API requests
|
||||
api: deduplicationManager.getDeduplicator("api", {
|
||||
ttl: 2 * TIME.MINUTE,
|
||||
maxPending: 20,
|
||||
timeout: 30 * TIME.SECOND,
|
||||
}),
|
||||
|
||||
// Database queries
|
||||
database: deduplicationManager.getDeduplicator("database", {
|
||||
ttl: 5 * TIME.MINUTE,
|
||||
maxPending: 15,
|
||||
timeout: 60 * TIME.SECOND,
|
||||
}),
|
||||
|
||||
// AI processing
|
||||
ai: deduplicationManager.getDeduplicator("ai", {
|
||||
ttl: 30 * TIME.MINUTE,
|
||||
maxPending: 5,
|
||||
timeout: 5 * TIME.MINUTE,
|
||||
}),
|
||||
|
||||
// File operations
|
||||
files: deduplicationManager.getDeduplicator("files", {
|
||||
ttl: 10 * TIME.MINUTE,
|
||||
maxPending: 10,
|
||||
timeout: 2 * TIME.MINUTE,
|
||||
}),
|
||||
|
||||
// Metrics calculations
|
||||
metrics: deduplicationManager.getDeduplicator("metrics", {
|
||||
ttl: 1 * TIME.MINUTE,
|
||||
maxPending: 30,
|
||||
timeout: 45 * TIME.SECOND,
|
||||
}),
|
||||
};
|
||||
|
||||
/**
|
||||
* Utility decorators and functions
|
||||
*/
|
||||
export class DeduplicationUtils {
|
||||
/**
|
||||
* Create a deduplicated version of an async function
|
||||
*/
|
||||
static deduplicate<T extends any[], R>(
|
||||
fn: (...args: T) => Promise<R>,
|
||||
deduplicatorName = "default",
|
||||
options: DeduplicationOptions = {}
|
||||
) {
|
||||
const deduplicator = deduplicationManager.getDeduplicator(
|
||||
deduplicatorName,
|
||||
options
|
||||
);
|
||||
return deduplicator.memoize(fn, options);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a decorator for class methods
|
||||
*/
|
||||
static deduplicatedMethod(
|
||||
deduplicatorName = "default",
|
||||
options: DeduplicationOptions = {}
|
||||
) {
|
||||
return (
|
||||
target: any,
|
||||
propertyKey: string,
|
||||
descriptor: PropertyDescriptor
|
||||
) => {
|
||||
const originalMethod = descriptor.value;
|
||||
|
||||
if (typeof originalMethod !== "function") {
|
||||
throw new Error(
|
||||
"Deduplicated decorator can only be applied to methods"
|
||||
);
|
||||
}
|
||||
|
||||
const deduplicator = deduplicationManager.getDeduplicator(
|
||||
deduplicatorName,
|
||||
options
|
||||
);
|
||||
|
||||
descriptor.value = function (...args: any[]) {
|
||||
const key = `${target.constructor.name}.${propertyKey}:${JSON.stringify(args)}`;
|
||||
return deduplicator.execute(
|
||||
key,
|
||||
() => originalMethod.apply(this, args),
|
||||
options
|
||||
);
|
||||
};
|
||||
|
||||
return descriptor;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Batch multiple requests with deduplication
|
||||
*/
|
||||
static async batch<T>(
|
||||
requests: Array<{
|
||||
key: string;
|
||||
fn: () => Promise<T>;
|
||||
options?: DeduplicationOptions;
|
||||
}>,
|
||||
deduplicatorName = "batch"
|
||||
): Promise<T[]> {
|
||||
const deduplicator = deduplicationManager.getDeduplicator(deduplicatorName);
|
||||
|
||||
const promises = requests.map(({ key, fn, options }) =>
|
||||
deduplicator.execute(key, fn, options)
|
||||
);
|
||||
|
||||
return Promise.all(promises);
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a request queue with automatic deduplication
|
||||
*/
|
||||
static createQueue<T>(
|
||||
deduplicatorName: string,
|
||||
options: DeduplicationOptions & {
|
||||
concurrency?: number;
|
||||
} = {}
|
||||
) {
|
||||
const deduplicator = deduplicationManager.getDeduplicator(
|
||||
deduplicatorName,
|
||||
options
|
||||
);
|
||||
const queue: Array<() => Promise<void>> = [];
|
||||
const { concurrency = 5 } = options;
|
||||
let running = 0;
|
||||
|
||||
const processQueue = async (): Promise<void> => {
|
||||
if (running >= concurrency || queue.length === 0) {
|
||||
return;
|
||||
}
|
||||
|
||||
running++;
|
||||
const task = queue.shift();
|
||||
|
||||
if (task) {
|
||||
try {
|
||||
await task();
|
||||
} catch (error) {
|
||||
console.error("Queue task failed:", error);
|
||||
} finally {
|
||||
running--;
|
||||
// Process next item
|
||||
setImmediate(processQueue);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
return {
|
||||
add: (key: string, fn: () => Promise<T>): Promise<T> => {
|
||||
return new Promise((resolve, reject) => {
|
||||
queue.push(async () => {
|
||||
try {
|
||||
const result = await deduplicator.execute(key, fn, options);
|
||||
resolve(result);
|
||||
} catch (error) {
|
||||
reject(error);
|
||||
}
|
||||
});
|
||||
|
||||
// Start processing if not at capacity
|
||||
setImmediate(processQueue);
|
||||
});
|
||||
},
|
||||
|
||||
getStats: () => ({
|
||||
queueLength: queue.length,
|
||||
running,
|
||||
concurrency,
|
||||
deduplicatorStats: deduplicator.getStats(),
|
||||
}),
|
||||
};
|
||||
}
|
||||
}
|
||||
451
lib/performance/integration.ts
Normal file
451
lib/performance/integration.ts
Normal file
@ -0,0 +1,451 @@
|
||||
/**
|
||||
* Performance Integration Utilities
|
||||
*
|
||||
* Provides easy-to-use helpers for integrating performance monitoring,
|
||||
* caching, and deduplication into existing services and API endpoints.
|
||||
*/
|
||||
|
||||
import { PerformanceUtils, performanceMonitor } from "./monitor";
|
||||
import { caches, CacheUtils } from "./cache";
|
||||
import { deduplicators, DeduplicationUtils } from "./deduplication";
|
||||
import type { NextRequest, NextResponse } from "next/server";
|
||||
|
||||
/**
|
||||
* Performance integration options
|
||||
*/
|
||||
export interface PerformanceIntegrationOptions {
|
||||
cache?: {
|
||||
enabled: boolean;
|
||||
cacheName?: string;
|
||||
ttl?: number;
|
||||
keyGenerator?: (...args: unknown[]) => string;
|
||||
};
|
||||
deduplication?: {
|
||||
enabled: boolean;
|
||||
deduplicatorName?: string;
|
||||
ttl?: number;
|
||||
keyGenerator?: (...args: unknown[]) => string;
|
||||
};
|
||||
monitoring?: {
|
||||
enabled: boolean;
|
||||
metricName?: string;
|
||||
recordRequests?: boolean;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Default performance integration options
|
||||
*/
|
||||
const defaultOptions: PerformanceIntegrationOptions = {
|
||||
cache: {
|
||||
enabled: true,
|
||||
cacheName: "api-responses",
|
||||
ttl: 5 * 60 * 1000, // 5 minutes
|
||||
},
|
||||
deduplication: {
|
||||
enabled: true,
|
||||
deduplicatorName: "api",
|
||||
ttl: 2 * 60 * 1000, // 2 minutes
|
||||
},
|
||||
monitoring: {
|
||||
enabled: true,
|
||||
recordRequests: true,
|
||||
},
|
||||
};
|
||||
|
||||
/**
|
||||
* Enhance a service method with performance optimizations
|
||||
*/
|
||||
export function enhanceServiceMethod<T extends unknown[], R>(
|
||||
methodName: string,
|
||||
originalMethod: (...args: T) => Promise<R>,
|
||||
options: PerformanceIntegrationOptions = {}
|
||||
): (...args: T) => Promise<R> {
|
||||
const opts = mergeOptions(defaultOptions, options);
|
||||
|
||||
return async (...args: T): Promise<R> => {
|
||||
const timer = PerformanceUtils.createTimer(`service.${methodName}`);
|
||||
|
||||
try {
|
||||
// Generate cache/deduplication key
|
||||
const key = opts.cache?.keyGenerator
|
||||
? opts.cache.keyGenerator(...args)
|
||||
: `${methodName}:${JSON.stringify(args)}`;
|
||||
|
||||
let result: R;
|
||||
|
||||
if (opts.cache?.enabled) {
|
||||
// Use caching
|
||||
const cache =
|
||||
caches[opts.cache.cacheName as keyof typeof caches] ||
|
||||
caches.apiResponses;
|
||||
result = await cache.getOrCompute(
|
||||
key,
|
||||
() =>
|
||||
executeWithDeduplication(methodName, originalMethod, args, opts),
|
||||
opts.cache.ttl
|
||||
);
|
||||
} else if (opts.deduplication?.enabled) {
|
||||
// Use deduplication only
|
||||
result = await executeWithDeduplication(
|
||||
methodName,
|
||||
originalMethod,
|
||||
args,
|
||||
opts
|
||||
);
|
||||
} else {
|
||||
// Direct execution with monitoring
|
||||
const { result: methodResult } = await PerformanceUtils.measureAsync(
|
||||
methodName,
|
||||
() => originalMethod(...args)
|
||||
);
|
||||
result = methodResult;
|
||||
}
|
||||
|
||||
if (opts.monitoring?.recordRequests) {
|
||||
performanceMonitor.recordRequest(timer.end(), false);
|
||||
}
|
||||
|
||||
return result;
|
||||
} catch (error) {
|
||||
if (opts.monitoring?.recordRequests) {
|
||||
performanceMonitor.recordRequest(timer.end(), true);
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute method with deduplication
|
||||
*/
|
||||
async function executeWithDeduplication<T extends unknown[], R>(
|
||||
methodName: string,
|
||||
originalMethod: (...args: T) => Promise<R>,
|
||||
args: T,
|
||||
opts: PerformanceIntegrationOptions
|
||||
): Promise<R> {
|
||||
if (!opts.deduplication?.enabled) {
|
||||
const { result } = await PerformanceUtils.measureAsync(methodName, () =>
|
||||
originalMethod(...args)
|
||||
);
|
||||
return result;
|
||||
}
|
||||
|
||||
const deduplicator =
|
||||
deduplicators[
|
||||
opts.deduplication.deduplicatorName as keyof typeof deduplicators
|
||||
] || deduplicators.api;
|
||||
|
||||
const key = opts.deduplication.keyGenerator
|
||||
? opts.deduplication.keyGenerator(...args)
|
||||
: `${methodName}:${JSON.stringify(args)}`;
|
||||
|
||||
return deduplicator.execute(
|
||||
key,
|
||||
() =>
|
||||
PerformanceUtils.measureAsync(methodName, () =>
|
||||
originalMethod(...args)
|
||||
).then(({ result }) => result),
|
||||
{ ttl: opts.deduplication.ttl }
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Enhance an API route handler with performance optimizations
|
||||
*/
|
||||
export function enhanceAPIRoute(
|
||||
originalHandler: (req: NextRequest) => Promise<NextResponse>,
|
||||
options: PerformanceIntegrationOptions & {
|
||||
routeName?: string;
|
||||
} = {}
|
||||
): (req: NextRequest) => Promise<NextResponse> {
|
||||
const opts = mergeOptions(defaultOptions, options);
|
||||
const routeName = options.routeName || "api-route";
|
||||
|
||||
return async (req: NextRequest): Promise<NextResponse> => {
|
||||
const timer = PerformanceUtils.createTimer(`api.${routeName}`);
|
||||
|
||||
try {
|
||||
// Start monitoring if not already running
|
||||
if (opts.monitoring?.enabled) {
|
||||
try {
|
||||
performanceMonitor.start();
|
||||
} catch {
|
||||
// Monitoring may already be running
|
||||
}
|
||||
}
|
||||
|
||||
let response: NextResponse;
|
||||
|
||||
if (opts.cache?.enabled || opts.deduplication?.enabled) {
|
||||
// Generate cache key from request
|
||||
const url = new URL(req.url);
|
||||
const method = req.method;
|
||||
const params = url.searchParams.toString();
|
||||
const cacheKey = `${method}:${url.pathname}:${params}`;
|
||||
|
||||
if (opts.cache?.enabled) {
|
||||
const cache =
|
||||
caches[opts.cache.cacheName as keyof typeof caches] ||
|
||||
caches.apiResponses;
|
||||
response = await cache.getOrCompute(
|
||||
cacheKey,
|
||||
() => originalHandler(req),
|
||||
opts.cache.ttl
|
||||
);
|
||||
} else {
|
||||
// Deduplication only
|
||||
const deduplicator =
|
||||
deduplicators[
|
||||
opts.deduplication!.deduplicatorName as keyof typeof deduplicators
|
||||
] || deduplicators.api;
|
||||
|
||||
response = await deduplicator.execute(
|
||||
cacheKey,
|
||||
() => originalHandler(req),
|
||||
{ ttl: opts.deduplication!.ttl }
|
||||
);
|
||||
}
|
||||
} else {
|
||||
// Direct execution with monitoring
|
||||
const { result } = await PerformanceUtils.measureAsync(routeName, () =>
|
||||
originalHandler(req)
|
||||
);
|
||||
response = result;
|
||||
}
|
||||
|
||||
if (opts.monitoring?.recordRequests) {
|
||||
performanceMonitor.recordRequest(timer.end(), false);
|
||||
}
|
||||
|
||||
return response;
|
||||
} catch (error) {
|
||||
if (opts.monitoring?.recordRequests) {
|
||||
performanceMonitor.recordRequest(timer.end(), true);
|
||||
}
|
||||
throw error;
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Class decorator for automatic performance enhancement
|
||||
*/
|
||||
export function PerformanceEnhanced(
|
||||
options: PerformanceIntegrationOptions = {}
|
||||
) {
|
||||
return function <T extends new (...args: any[]) => {}>(constructor: T) {
|
||||
return class extends constructor {
|
||||
constructor(...args: any[]) {
|
||||
super(...args);
|
||||
|
||||
// Enhance all async methods
|
||||
const proto = Object.getPrototypeOf(this);
|
||||
const methodNames = Object.getOwnPropertyNames(proto).filter(
|
||||
(name) => name !== "constructor" && typeof proto[name] === "function"
|
||||
);
|
||||
|
||||
methodNames.forEach((methodName) => {
|
||||
const originalMethod = this[methodName as keyof this];
|
||||
if (typeof originalMethod === "function") {
|
||||
(this as Record<string, unknown>)[methodName] =
|
||||
enhanceServiceMethod(
|
||||
`${constructor.name}.${methodName}`,
|
||||
originalMethod.bind(this),
|
||||
options
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
};
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Method decorator for individual method enhancement
|
||||
*/
|
||||
export function PerformanceOptimized(
|
||||
options: PerformanceIntegrationOptions = {}
|
||||
) {
|
||||
return function (
|
||||
target: unknown,
|
||||
propertyKey: string,
|
||||
descriptor: PropertyDescriptor
|
||||
) {
|
||||
const originalMethod = descriptor.value;
|
||||
|
||||
if (typeof originalMethod !== "function") {
|
||||
throw new Error("PerformanceOptimized can only be applied to methods");
|
||||
}
|
||||
|
||||
descriptor.value = enhanceServiceMethod(
|
||||
`${(target as any).constructor.name}.${propertyKey}`,
|
||||
originalMethod,
|
||||
options
|
||||
);
|
||||
|
||||
return descriptor;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Simple caching decorator
|
||||
*/
|
||||
export function Cached(
|
||||
cacheName: string = "default",
|
||||
ttl: number = 5 * 60 * 1000,
|
||||
keyGenerator?: (...args: unknown[]) => string
|
||||
) {
|
||||
return function (
|
||||
target: unknown,
|
||||
propertyKey: string,
|
||||
descriptor: PropertyDescriptor
|
||||
) {
|
||||
const originalMethod = descriptor.value;
|
||||
|
||||
if (typeof originalMethod !== "function") {
|
||||
throw new Error("Cached decorator can only be applied to methods");
|
||||
}
|
||||
|
||||
descriptor.value = CacheUtils.cached(
|
||||
`${(target as any).constructor.name}.${propertyKey}`,
|
||||
originalMethod,
|
||||
{
|
||||
ttl,
|
||||
keyGenerator:
|
||||
keyGenerator ||
|
||||
((...args) =>
|
||||
`${(target as any).constructor.name}.${propertyKey}:${JSON.stringify(args)}`),
|
||||
}
|
||||
);
|
||||
|
||||
return descriptor;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Simple deduplication decorator
|
||||
*/
|
||||
export function Deduplicated(
|
||||
deduplicatorName: string = "default",
|
||||
ttl: number = 2 * 60 * 1000
|
||||
) {
|
||||
return DeduplicationUtils.deduplicatedMethod(deduplicatorName, { ttl });
|
||||
}
|
||||
|
||||
/**
|
||||
* Performance monitoring decorator
|
||||
*/
|
||||
export function Monitored(metricName?: string) {
|
||||
return PerformanceUtils.measured(metricName);
|
||||
}
|
||||
|
||||
/**
|
||||
* Utility function to merge options
|
||||
*/
|
||||
function mergeOptions(
|
||||
defaults: PerformanceIntegrationOptions,
|
||||
overrides: PerformanceIntegrationOptions
|
||||
): PerformanceIntegrationOptions {
|
||||
return {
|
||||
cache: defaults.cache && overrides.cache
|
||||
? { ...defaults.cache, ...overrides.cache }
|
||||
: defaults.cache || overrides.cache,
|
||||
deduplication: defaults.deduplication && overrides.deduplication
|
||||
? { ...defaults.deduplication, ...overrides.deduplication }
|
||||
: defaults.deduplication || overrides.deduplication,
|
||||
monitoring: defaults.monitoring && overrides.monitoring
|
||||
? { ...defaults.monitoring, ...overrides.monitoring }
|
||||
: defaults.monitoring || overrides.monitoring,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a performance-enhanced service instance
|
||||
*/
|
||||
export function createEnhancedService<T>(
|
||||
ServiceClass: new (...args: unknown[]) => T,
|
||||
options: PerformanceIntegrationOptions = {}
|
||||
): new (...args: unknown[]) => T {
|
||||
return PerformanceEnhanced(options)(ServiceClass as never);
|
||||
}
|
||||
|
||||
/**
|
||||
* Batch performance enhancement for multiple methods
|
||||
*/
|
||||
export function enhanceServiceMethods<
|
||||
T extends Record<string, (...args: unknown[]) => Promise<unknown>>,
|
||||
>(service: T, options: PerformanceIntegrationOptions = {}): T {
|
||||
const enhanced = {} as T;
|
||||
|
||||
for (const [methodName, method] of Object.entries(service)) {
|
||||
if (typeof method === "function") {
|
||||
enhanced[methodName as keyof T] = enhanceServiceMethod(
|
||||
methodName,
|
||||
method,
|
||||
options
|
||||
) as T[keyof T];
|
||||
} else {
|
||||
enhanced[methodName as keyof T] = method;
|
||||
}
|
||||
}
|
||||
|
||||
return enhanced;
|
||||
}
|
||||
|
||||
/**
|
||||
* Performance integration status
|
||||
*/
|
||||
export function getPerformanceIntegrationStatus() {
|
||||
try {
|
||||
const metrics = performanceMonitor.getCurrentMetrics();
|
||||
return {
|
||||
monitoring: {
|
||||
active: true, // If we can get metrics, monitoring is active
|
||||
metrics,
|
||||
},
|
||||
caching: {
|
||||
stats: caches.metrics.getStats(),
|
||||
totalCaches: Object.keys(caches).length,
|
||||
},
|
||||
deduplication: {
|
||||
stats: deduplicators.api.getStats(),
|
||||
totalDeduplicators: Object.keys(deduplicators).length,
|
||||
},
|
||||
};
|
||||
} catch {
|
||||
return {
|
||||
monitoring: {
|
||||
active: false,
|
||||
metrics: null,
|
||||
},
|
||||
caching: {
|
||||
stats: caches.metrics.getStats(),
|
||||
totalCaches: Object.keys(caches).length,
|
||||
},
|
||||
deduplication: {
|
||||
stats: deduplicators.api.getStats(),
|
||||
totalDeduplicators: Object.keys(deduplicators).length,
|
||||
},
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Initialize performance systems
|
||||
*/
|
||||
export function initializePerformanceSystems(
|
||||
options: {
|
||||
monitoring?: boolean;
|
||||
monitoringInterval?: number;
|
||||
} = {}
|
||||
) {
|
||||
if (options.monitoring !== false) {
|
||||
const interval = options.monitoringInterval || 30000;
|
||||
performanceMonitor.start(interval);
|
||||
// Performance monitoring started
|
||||
}
|
||||
|
||||
// Performance systems initialized
|
||||
}
|
||||
791
lib/performance/monitor.ts
Normal file
791
lib/performance/monitor.ts
Normal file
@ -0,0 +1,791 @@
|
||||
/**
|
||||
* Performance Monitoring and Optimization System
|
||||
*
|
||||
* Provides real-time performance monitoring, bottleneck detection,
|
||||
* and automatic optimization recommendations for the application.
|
||||
*/
|
||||
|
||||
import { PerformanceObserver, performance } from "node:perf_hooks";
|
||||
import { TIME } from "../constants";
|
||||
import { cacheManager } from "./cache";
|
||||
import { deduplicationManager } from "./deduplication";
|
||||
|
||||
/**
|
||||
* Performance metrics collection
|
||||
*/
|
||||
export interface PerformanceMetrics {
|
||||
timestamp: number;
|
||||
|
||||
// Memory metrics
|
||||
memoryUsage: {
|
||||
rss: number; // Resident Set Size
|
||||
heapUsed: number;
|
||||
heapTotal: number;
|
||||
external: number;
|
||||
arrayBuffers: number;
|
||||
};
|
||||
|
||||
// CPU metrics
|
||||
cpuUsage: {
|
||||
user: number;
|
||||
system: number;
|
||||
};
|
||||
|
||||
// Event loop metrics
|
||||
eventLoop: {
|
||||
delay: number; // Event loop lag
|
||||
utilization: number;
|
||||
};
|
||||
|
||||
// Cache performance
|
||||
cacheMetrics: {
|
||||
totalCaches: number;
|
||||
totalMemoryUsage: number;
|
||||
averageHitRate: number;
|
||||
topPerformers: Array<{ name: string; hitRate: number }>;
|
||||
};
|
||||
|
||||
// Deduplication performance
|
||||
deduplicationMetrics: {
|
||||
totalDeduplicators: number;
|
||||
averageHitRate: number;
|
||||
totalDeduplicatedRequests: number;
|
||||
};
|
||||
|
||||
// Request metrics
|
||||
requestMetrics: {
|
||||
totalRequests: number;
|
||||
averageResponseTime: number;
|
||||
errorRate: number;
|
||||
slowRequests: number; // Requests taking > 1 second
|
||||
};
|
||||
|
||||
// Custom metrics
|
||||
customMetrics: Record<string, number>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Performance alert levels
|
||||
*/
|
||||
export enum AlertLevel {
|
||||
INFO = "info",
|
||||
WARNING = "warning",
|
||||
CRITICAL = "critical",
|
||||
}
|
||||
|
||||
/**
|
||||
* Performance alert
|
||||
*/
|
||||
export interface PerformanceAlert {
|
||||
level: AlertLevel;
|
||||
metric: string;
|
||||
message: string;
|
||||
value: number;
|
||||
threshold: number;
|
||||
timestamp: number;
|
||||
recommendations: string[];
|
||||
}
|
||||
|
||||
/**
|
||||
* Performance bottleneck types
|
||||
*/
|
||||
export enum BottleneckType {
|
||||
MEMORY = "memory",
|
||||
CPU = "cpu",
|
||||
EVENT_LOOP = "event_loop",
|
||||
CACHE_MISS = "cache_miss",
|
||||
SLOW_QUERIES = "slow_queries",
|
||||
HIGH_LATENCY = "high_latency",
|
||||
}
|
||||
|
||||
/**
|
||||
* Bottleneck detection result
|
||||
*/
|
||||
export interface Bottleneck {
|
||||
type: BottleneckType;
|
||||
severity: AlertLevel;
|
||||
description: string;
|
||||
impact: number; // 0-100 scale
|
||||
recommendations: string[];
|
||||
metrics: Record<string, number>;
|
||||
}
|
||||
|
||||
/**
|
||||
* Performance thresholds configuration
|
||||
*/
|
||||
export interface PerformanceThresholds {
|
||||
memory: {
|
||||
heapUsedWarning: number; // MB
|
||||
heapUsedCritical: number; // MB
|
||||
rssWarning: number; // MB
|
||||
rssCritical: number; // MB
|
||||
};
|
||||
cpu: {
|
||||
usageWarning: number; // Percentage
|
||||
usageCritical: number; // Percentage
|
||||
};
|
||||
eventLoop: {
|
||||
delayWarning: number; // Milliseconds
|
||||
delayCritical: number; // Milliseconds
|
||||
utilizationWarning: number; // Percentage
|
||||
};
|
||||
cache: {
|
||||
hitRateWarning: number; // Percentage
|
||||
memoryUsageWarning: number; // MB
|
||||
};
|
||||
response: {
|
||||
averageTimeWarning: number; // Milliseconds
|
||||
errorRateWarning: number; // Percentage
|
||||
slowRequestThreshold: number; // Milliseconds
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Performance optimization recommendation
|
||||
*/
|
||||
export interface OptimizationRecommendation {
|
||||
priority: "high" | "medium" | "low";
|
||||
category: string;
|
||||
title: string;
|
||||
description: string;
|
||||
implementation: string;
|
||||
estimatedImpact: number; // 0-100 scale
|
||||
}
|
||||
|
||||
/**
|
||||
* Main performance monitor class
|
||||
*/
|
||||
export class PerformanceMonitor {
|
||||
private isMonitoring = false;
|
||||
private metricsHistory: PerformanceMetrics[] = [];
|
||||
private customMetrics = new Map<string, number>();
|
||||
private requestMetrics = {
|
||||
totalRequests: 0,
|
||||
totalResponseTime: 0,
|
||||
errors: 0,
|
||||
slowRequests: 0,
|
||||
};
|
||||
|
||||
private readonly maxHistorySize = 100;
|
||||
private monitoringInterval: NodeJS.Timeout | null = null;
|
||||
private perfObserver: PerformanceObserver | null = null;
|
||||
|
||||
private readonly defaultThresholds: PerformanceThresholds = {
|
||||
memory: {
|
||||
heapUsedWarning: 200, // 200 MB
|
||||
heapUsedCritical: 400, // 400 MB
|
||||
rssWarning: 300, // 300 MB
|
||||
rssCritical: 600, // 600 MB
|
||||
},
|
||||
cpu: {
|
||||
usageWarning: 70, // 70%
|
||||
usageCritical: 90, // 90%
|
||||
},
|
||||
eventLoop: {
|
||||
delayWarning: 10, // 10ms
|
||||
delayCritical: 50, // 50ms
|
||||
utilizationWarning: 80, // 80%
|
||||
},
|
||||
cache: {
|
||||
hitRateWarning: 50, // 50%
|
||||
memoryUsageWarning: 100, // 100 MB
|
||||
},
|
||||
response: {
|
||||
averageTimeWarning: 1000, // 1 second
|
||||
errorRateWarning: 5, // 5%
|
||||
slowRequestThreshold: 1000, // 1 second
|
||||
},
|
||||
};
|
||||
|
||||
private thresholds: PerformanceThresholds;
|
||||
|
||||
constructor(thresholdsOverride: Partial<PerformanceThresholds> = {}) {
|
||||
this.thresholds = { ...this.defaultThresholds, ...thresholdsOverride };
|
||||
}
|
||||
|
||||
/**
|
||||
* Start performance monitoring
|
||||
*/
|
||||
start(intervalMs = 30000): void {
|
||||
if (this.isMonitoring) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.isMonitoring = true;
|
||||
|
||||
// Set up performance observer for timing data
|
||||
this.setupPerformanceObserver();
|
||||
|
||||
// Start periodic metrics collection
|
||||
this.monitoringInterval = setInterval(() => {
|
||||
this.collectMetrics();
|
||||
}, intervalMs);
|
||||
|
||||
console.log(
|
||||
"[Performance Monitor] Started monitoring with interval:",
|
||||
intervalMs + "ms"
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop performance monitoring
|
||||
*/
|
||||
stop(): void {
|
||||
if (!this.isMonitoring) {
|
||||
return;
|
||||
}
|
||||
|
||||
this.isMonitoring = false;
|
||||
|
||||
if (this.monitoringInterval) {
|
||||
clearInterval(this.monitoringInterval);
|
||||
this.monitoringInterval = null;
|
||||
}
|
||||
|
||||
if (this.perfObserver) {
|
||||
this.perfObserver.disconnect();
|
||||
this.perfObserver = null;
|
||||
}
|
||||
|
||||
console.log("[Performance Monitor] Stopped monitoring");
|
||||
}
|
||||
|
||||
/**
|
||||
* Record a custom metric
|
||||
*/
|
||||
recordMetric(name: string, value: number): void {
|
||||
this.customMetrics.set(name, value);
|
||||
}
|
||||
|
||||
/**
|
||||
* Record request metrics
|
||||
*/
|
||||
recordRequest(responseTime: number, isError = false): void {
|
||||
this.requestMetrics.totalRequests++;
|
||||
this.requestMetrics.totalResponseTime += responseTime;
|
||||
|
||||
if (isError) {
|
||||
this.requestMetrics.errors++;
|
||||
}
|
||||
|
||||
if (responseTime > this.thresholds.response.slowRequestThreshold) {
|
||||
this.requestMetrics.slowRequests++;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get current performance metrics
|
||||
*/
|
||||
getCurrentMetrics(): PerformanceMetrics {
|
||||
const memoryUsage = process.memoryUsage();
|
||||
const cpuUsage = process.cpuUsage();
|
||||
|
||||
// Calculate event loop metrics
|
||||
const start = performance.now();
|
||||
setImmediate(() => {
|
||||
const eventLoopDelay = performance.now() - start;
|
||||
|
||||
// Event loop utilization (approximated)
|
||||
const eventLoopUtilization = Math.min(
|
||||
100,
|
||||
(eventLoopDelay / 16.67) * 100
|
||||
); // 16.67ms = 60fps
|
||||
});
|
||||
|
||||
// Get cache metrics
|
||||
const cacheReport = cacheManager.getPerformanceReport();
|
||||
|
||||
// Get deduplication metrics
|
||||
const deduplicationStats = deduplicationManager.getAllStats();
|
||||
const deduplicationHitRates = Object.values(deduplicationStats).map(
|
||||
(s) => s.hitRate
|
||||
);
|
||||
const averageDeduplicationHitRate =
|
||||
deduplicationHitRates.length > 0
|
||||
? deduplicationHitRates.reduce((sum, rate) => sum + rate, 0) /
|
||||
deduplicationHitRates.length
|
||||
: 0;
|
||||
|
||||
const totalDeduplicatedRequests = Object.values(deduplicationStats).reduce(
|
||||
(sum, stats) => sum + stats.deduplicatedRequests,
|
||||
0
|
||||
);
|
||||
|
||||
// Calculate request metrics
|
||||
const averageResponseTime =
|
||||
this.requestMetrics.totalRequests > 0
|
||||
? this.requestMetrics.totalResponseTime /
|
||||
this.requestMetrics.totalRequests
|
||||
: 0;
|
||||
|
||||
const errorRate =
|
||||
this.requestMetrics.totalRequests > 0
|
||||
? (this.requestMetrics.errors / this.requestMetrics.totalRequests) * 100
|
||||
: 0;
|
||||
|
||||
return {
|
||||
timestamp: Date.now(),
|
||||
memoryUsage: {
|
||||
rss: Math.round(memoryUsage.rss / 1024 / 1024), // Convert to MB
|
||||
heapUsed: Math.round(memoryUsage.heapUsed / 1024 / 1024),
|
||||
heapTotal: Math.round(memoryUsage.heapTotal / 1024 / 1024),
|
||||
external: Math.round(memoryUsage.external / 1024 / 1024),
|
||||
arrayBuffers: Math.round(memoryUsage.arrayBuffers / 1024 / 1024),
|
||||
},
|
||||
cpuUsage: {
|
||||
user: cpuUsage.user / 1000, // Convert to milliseconds
|
||||
system: cpuUsage.system / 1000,
|
||||
},
|
||||
eventLoop: {
|
||||
delay: 0, // Will be updated asynchronously
|
||||
utilization: 0, // Will be updated asynchronously
|
||||
},
|
||||
cacheMetrics: {
|
||||
totalCaches: cacheReport.totalCaches,
|
||||
totalMemoryUsage: Math.round(
|
||||
cacheReport.totalMemoryUsage / 1024 / 1024
|
||||
), // MB
|
||||
averageHitRate: cacheReport.averageHitRate * 100, // Percentage
|
||||
topPerformers: cacheReport.topPerformers.slice(0, 3),
|
||||
},
|
||||
deduplicationMetrics: {
|
||||
totalDeduplicators: Object.keys(deduplicationStats).length,
|
||||
averageHitRate: averageDeduplicationHitRate * 100, // Percentage
|
||||
totalDeduplicatedRequests,
|
||||
},
|
||||
requestMetrics: {
|
||||
totalRequests: this.requestMetrics.totalRequests,
|
||||
averageResponseTime,
|
||||
errorRate,
|
||||
slowRequests: this.requestMetrics.slowRequests,
|
||||
},
|
||||
customMetrics: Object.fromEntries(this.customMetrics),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect performance bottlenecks
|
||||
*/
|
||||
detectBottlenecks(metrics?: PerformanceMetrics): Bottleneck[] {
|
||||
const currentMetrics = metrics || this.getCurrentMetrics();
|
||||
const bottlenecks: Bottleneck[] = [];
|
||||
|
||||
// Memory bottlenecks
|
||||
if (
|
||||
currentMetrics.memoryUsage.heapUsed >
|
||||
this.thresholds.memory.heapUsedCritical
|
||||
) {
|
||||
bottlenecks.push({
|
||||
type: BottleneckType.MEMORY,
|
||||
severity: AlertLevel.CRITICAL,
|
||||
description: `Heap memory usage is critically high: ${currentMetrics.memoryUsage.heapUsed}MB`,
|
||||
impact: 90,
|
||||
recommendations: [
|
||||
"Investigate memory leaks in application code",
|
||||
"Implement object pooling for frequently created objects",
|
||||
"Reduce cache sizes or TTL values",
|
||||
"Consider increasing available memory or horizontal scaling",
|
||||
],
|
||||
metrics: { heapUsed: currentMetrics.memoryUsage.heapUsed },
|
||||
});
|
||||
} else if (
|
||||
currentMetrics.memoryUsage.heapUsed >
|
||||
this.thresholds.memory.heapUsedWarning
|
||||
) {
|
||||
bottlenecks.push({
|
||||
type: BottleneckType.MEMORY,
|
||||
severity: AlertLevel.WARNING,
|
||||
description: `Heap memory usage is high: ${currentMetrics.memoryUsage.heapUsed}MB`,
|
||||
impact: 60,
|
||||
recommendations: [
|
||||
"Monitor memory usage trends",
|
||||
"Review cache configurations for optimization opportunities",
|
||||
"Implement garbage collection optimization",
|
||||
],
|
||||
metrics: { heapUsed: currentMetrics.memoryUsage.heapUsed },
|
||||
});
|
||||
}
|
||||
|
||||
// Event loop bottlenecks
|
||||
if (
|
||||
currentMetrics.eventLoop.delay > this.thresholds.eventLoop.delayCritical
|
||||
) {
|
||||
bottlenecks.push({
|
||||
type: BottleneckType.EVENT_LOOP,
|
||||
severity: AlertLevel.CRITICAL,
|
||||
description: `Event loop delay is critically high: ${currentMetrics.eventLoop.delay}ms`,
|
||||
impact: 95,
|
||||
recommendations: [
|
||||
"Identify and optimize CPU-intensive synchronous operations",
|
||||
"Move heavy computations to worker threads",
|
||||
"Implement request queuing and rate limiting",
|
||||
"Profile application to find blocking operations",
|
||||
],
|
||||
metrics: { eventLoopDelay: currentMetrics.eventLoop.delay },
|
||||
});
|
||||
}
|
||||
|
||||
// Cache performance bottlenecks
|
||||
if (
|
||||
currentMetrics.cacheMetrics.averageHitRate <
|
||||
this.thresholds.cache.hitRateWarning
|
||||
) {
|
||||
bottlenecks.push({
|
||||
type: BottleneckType.CACHE_MISS,
|
||||
severity: AlertLevel.WARNING,
|
||||
description: `Cache hit rate is low: ${currentMetrics.cacheMetrics.averageHitRate.toFixed(1)}%`,
|
||||
impact: 40,
|
||||
recommendations: [
|
||||
"Review cache key strategies and TTL configurations",
|
||||
"Implement cache warming for frequently accessed data",
|
||||
"Analyze cache access patterns to optimize cache sizes",
|
||||
"Consider implementing cache hierarchies",
|
||||
],
|
||||
metrics: { hitRate: currentMetrics.cacheMetrics.averageHitRate },
|
||||
});
|
||||
}
|
||||
|
||||
// Response time bottlenecks
|
||||
if (
|
||||
currentMetrics.requestMetrics.averageResponseTime >
|
||||
this.thresholds.response.averageTimeWarning
|
||||
) {
|
||||
bottlenecks.push({
|
||||
type: BottleneckType.HIGH_LATENCY,
|
||||
severity: AlertLevel.WARNING,
|
||||
description: `Average response time is high: ${currentMetrics.requestMetrics.averageResponseTime.toFixed(0)}ms`,
|
||||
impact: 70,
|
||||
recommendations: [
|
||||
"Implement request caching for expensive operations",
|
||||
"Optimize database queries and add missing indexes",
|
||||
"Enable response compression",
|
||||
"Consider implementing CDN for static assets",
|
||||
],
|
||||
metrics: {
|
||||
averageResponseTime:
|
||||
currentMetrics.requestMetrics.averageResponseTime,
|
||||
},
|
||||
});
|
||||
}
|
||||
|
||||
return bottlenecks;
|
||||
}
|
||||
|
||||
/**
|
||||
* Generate optimization recommendations
|
||||
*/
|
||||
generateRecommendations(
|
||||
metrics?: PerformanceMetrics
|
||||
): OptimizationRecommendation[] {
|
||||
const currentMetrics = metrics || this.getCurrentMetrics();
|
||||
const recommendations: OptimizationRecommendation[] = [];
|
||||
|
||||
// Memory optimization recommendations
|
||||
if (currentMetrics.memoryUsage.heapUsed > 100) {
|
||||
// 100MB
|
||||
recommendations.push({
|
||||
priority: "high",
|
||||
category: "Memory",
|
||||
title: "Implement Memory Optimization",
|
||||
description:
|
||||
"High memory usage detected. Consider implementing memory optimization strategies.",
|
||||
implementation:
|
||||
"Review object lifecycle, implement object pooling, optimize cache configurations",
|
||||
estimatedImpact: 75,
|
||||
});
|
||||
}
|
||||
|
||||
// Cache optimization recommendations
|
||||
if (currentMetrics.cacheMetrics.averageHitRate < 70) {
|
||||
recommendations.push({
|
||||
priority: "medium",
|
||||
category: "Caching",
|
||||
title: "Improve Cache Performance",
|
||||
description:
|
||||
"Cache hit rate is below optimal. Implement cache optimization strategies.",
|
||||
implementation:
|
||||
"Adjust TTL values, implement cache warming, optimize cache key strategies",
|
||||
estimatedImpact: 60,
|
||||
});
|
||||
}
|
||||
|
||||
// Response time optimization
|
||||
if (currentMetrics.requestMetrics.averageResponseTime > 500) {
|
||||
recommendations.push({
|
||||
priority: "high",
|
||||
category: "Performance",
|
||||
title: "Reduce Response Times",
|
||||
description:
|
||||
"Average response time exceeds target. Implement performance optimizations.",
|
||||
implementation:
|
||||
"Add response caching, optimize database queries, implement request deduplication",
|
||||
estimatedImpact: 80,
|
||||
});
|
||||
}
|
||||
|
||||
// Deduplication optimization
|
||||
if (currentMetrics.deduplicationMetrics.averageHitRate < 30) {
|
||||
recommendations.push({
|
||||
priority: "low",
|
||||
category: "Optimization",
|
||||
title: "Improve Request Deduplication",
|
||||
description:
|
||||
"Low deduplication hit rate suggests opportunities for optimization.",
|
||||
implementation:
|
||||
"Review deduplication key strategies, increase TTL for stable operations",
|
||||
estimatedImpact: 40,
|
||||
});
|
||||
}
|
||||
|
||||
return recommendations.sort((a, b) => {
|
||||
const priorityOrder = { high: 3, medium: 2, low: 1 };
|
||||
return priorityOrder[b.priority] - priorityOrder[a.priority];
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Get performance history
|
||||
*/
|
||||
getHistory(limit?: number): PerformanceMetrics[] {
|
||||
return limit ? this.metricsHistory.slice(-limit) : [...this.metricsHistory];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get performance summary
|
||||
*/
|
||||
getPerformanceSummary(): {
|
||||
currentMetrics: PerformanceMetrics;
|
||||
bottlenecks: Bottleneck[];
|
||||
recommendations: OptimizationRecommendation[];
|
||||
trends: {
|
||||
memoryTrend: "increasing" | "decreasing" | "stable";
|
||||
responseTrend: "improving" | "degrading" | "stable";
|
||||
cacheTrend: "improving" | "degrading" | "stable";
|
||||
};
|
||||
} {
|
||||
const currentMetrics = this.getCurrentMetrics();
|
||||
const bottlenecks = this.detectBottlenecks(currentMetrics);
|
||||
const recommendations = this.generateRecommendations(currentMetrics);
|
||||
|
||||
// Calculate trends
|
||||
const trends = this.calculateTrends();
|
||||
|
||||
return {
|
||||
currentMetrics,
|
||||
bottlenecks,
|
||||
recommendations,
|
||||
trends,
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Set up performance observer for timing data
|
||||
*/
|
||||
private setupPerformanceObserver(): void {
|
||||
try {
|
||||
this.perfObserver = new PerformanceObserver((list) => {
|
||||
const entries = list.getEntries();
|
||||
entries.forEach((entry) => {
|
||||
if (entry.entryType === "measure") {
|
||||
this.recordMetric(`timing.${entry.name}`, entry.duration);
|
||||
}
|
||||
});
|
||||
});
|
||||
|
||||
this.perfObserver.observe({ entryTypes: ["measure"] });
|
||||
} catch (error) {
|
||||
console.warn(
|
||||
"[Performance Monitor] Failed to setup performance observer:",
|
||||
error
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Collect and store metrics
|
||||
*/
|
||||
private collectMetrics(): void {
|
||||
try {
|
||||
const metrics = this.getCurrentMetrics();
|
||||
|
||||
// Add to history
|
||||
this.metricsHistory.push(metrics);
|
||||
|
||||
// Limit history size
|
||||
if (this.metricsHistory.length > this.maxHistorySize) {
|
||||
this.metricsHistory.shift();
|
||||
}
|
||||
|
||||
// Check for bottlenecks and log warnings
|
||||
const bottlenecks = this.detectBottlenecks(metrics);
|
||||
bottlenecks.forEach((bottleneck) => {
|
||||
if (bottleneck.severity === AlertLevel.CRITICAL) {
|
||||
console.error(
|
||||
`[Performance Monitor] CRITICAL: ${bottleneck.description}`
|
||||
);
|
||||
} else if (bottleneck.severity === AlertLevel.WARNING) {
|
||||
console.warn(
|
||||
`[Performance Monitor] WARNING: ${bottleneck.description}`
|
||||
);
|
||||
}
|
||||
});
|
||||
} catch (error) {
|
||||
console.error("[Performance Monitor] Failed to collect metrics:", error);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Calculate performance trends
|
||||
*/
|
||||
private calculateTrends(): {
|
||||
memoryTrend: "increasing" | "decreasing" | "stable";
|
||||
responseTrend: "improving" | "degrading" | "stable";
|
||||
cacheTrend: "improving" | "degrading" | "stable";
|
||||
} {
|
||||
if (this.metricsHistory.length < 5) {
|
||||
return {
|
||||
memoryTrend: "stable",
|
||||
responseTrend: "stable",
|
||||
cacheTrend: "stable",
|
||||
};
|
||||
}
|
||||
|
||||
const recent = this.metricsHistory.slice(-5);
|
||||
const older = this.metricsHistory.slice(-10, -5);
|
||||
|
||||
if (older.length === 0) {
|
||||
return {
|
||||
memoryTrend: "stable",
|
||||
responseTrend: "stable",
|
||||
cacheTrend: "stable",
|
||||
};
|
||||
}
|
||||
|
||||
// Calculate averages
|
||||
const recentMemory =
|
||||
recent.reduce((sum, m) => sum + m.memoryUsage.heapUsed, 0) /
|
||||
recent.length;
|
||||
const olderMemory =
|
||||
older.reduce((sum, m) => sum + m.memoryUsage.heapUsed, 0) / older.length;
|
||||
|
||||
const recentResponse =
|
||||
recent.reduce((sum, m) => sum + m.requestMetrics.averageResponseTime, 0) /
|
||||
recent.length;
|
||||
const olderResponse =
|
||||
older.reduce((sum, m) => sum + m.requestMetrics.averageResponseTime, 0) /
|
||||
older.length;
|
||||
|
||||
const recentCache =
|
||||
recent.reduce((sum, m) => sum + m.cacheMetrics.averageHitRate, 0) /
|
||||
recent.length;
|
||||
const olderCache =
|
||||
older.reduce((sum, m) => sum + m.cacheMetrics.averageHitRate, 0) /
|
||||
older.length;
|
||||
|
||||
return {
|
||||
memoryTrend:
|
||||
recentMemory > olderMemory * 1.1
|
||||
? "increasing"
|
||||
: recentMemory < olderMemory * 0.9
|
||||
? "decreasing"
|
||||
: "stable",
|
||||
responseTrend:
|
||||
recentResponse < olderResponse * 0.9
|
||||
? "improving"
|
||||
: recentResponse > olderResponse * 1.1
|
||||
? "degrading"
|
||||
: "stable",
|
||||
cacheTrend:
|
||||
recentCache > olderCache * 1.1
|
||||
? "improving"
|
||||
: recentCache < olderCache * 0.9
|
||||
? "degrading"
|
||||
: "stable",
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Global performance monitor instance
|
||||
*/
|
||||
export const performanceMonitor = new PerformanceMonitor();
|
||||
|
||||
/**
|
||||
* Performance monitoring utilities
|
||||
*/
|
||||
export class PerformanceUtils {
|
||||
/**
|
||||
* Measure execution time of a function
|
||||
*/
|
||||
static async measureAsync<T>(
|
||||
name: string,
|
||||
fn: () => Promise<T>
|
||||
): Promise<{ result: T; duration: number }> {
|
||||
const start = performance.now();
|
||||
const result = await fn();
|
||||
const duration = performance.now() - start;
|
||||
|
||||
performanceMonitor.recordMetric(`execution.${name}`, duration);
|
||||
|
||||
return { result, duration };
|
||||
}
|
||||
|
||||
/**
|
||||
* Measure execution time of a synchronous function
|
||||
*/
|
||||
static measure<T>(
|
||||
name: string,
|
||||
fn: () => T
|
||||
): { result: T; duration: number } {
|
||||
const start = performance.now();
|
||||
const result = fn();
|
||||
const duration = performance.now() - start;
|
||||
|
||||
performanceMonitor.recordMetric(`execution.${name}`, duration);
|
||||
|
||||
return { result, duration };
|
||||
}
|
||||
|
||||
/**
|
||||
* Create a performance timer
|
||||
*/
|
||||
static createTimer(name: string) {
|
||||
const start = performance.now();
|
||||
|
||||
return {
|
||||
end: () => {
|
||||
const duration = performance.now() - start;
|
||||
performanceMonitor.recordMetric(`timer.${name}`, duration);
|
||||
return duration;
|
||||
},
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Decorator for measuring method performance
|
||||
*/
|
||||
static measured(name?: string) {
|
||||
return (
|
||||
target: any,
|
||||
propertyKey: string,
|
||||
descriptor: PropertyDescriptor
|
||||
) => {
|
||||
const originalMethod = descriptor.value;
|
||||
const metricName = name || `${target.constructor.name}.${propertyKey}`;
|
||||
|
||||
if (typeof originalMethod !== "function") {
|
||||
throw new Error("Measured decorator can only be applied to methods");
|
||||
}
|
||||
|
||||
descriptor.value = async function (...args: any[]) {
|
||||
const { result, duration } = await PerformanceUtils.measureAsync(
|
||||
metricName,
|
||||
() => originalMethod.apply(this, args)
|
||||
);
|
||||
return result;
|
||||
};
|
||||
|
||||
return descriptor;
|
||||
};
|
||||
}
|
||||
}
|
||||
675
lib/performance/optimizer.ts
Normal file
675
lib/performance/optimizer.ts
Normal file
@ -0,0 +1,675 @@
|
||||
/**
|
||||
* Performance Optimizer Service
|
||||
*
|
||||
* Analyzes performance data and automatically applies optimizations
|
||||
* to improve system performance based on real-time metrics.
|
||||
*/
|
||||
|
||||
import {
|
||||
performanceMonitor,
|
||||
type PerformanceMetrics,
|
||||
type Bottleneck,
|
||||
} from "./monitor";
|
||||
import { cacheManager, type CacheStats } from "./cache";
|
||||
import { deduplicationManager } from "./deduplication";
|
||||
import { TIME } from "../constants";
|
||||
|
||||
/**
|
||||
* Optimization action types
|
||||
*/
|
||||
export enum OptimizationAction {
|
||||
ADJUST_CACHE_TTL = "adjust_cache_ttl",
|
||||
INCREASE_CACHE_SIZE = "increase_cache_size",
|
||||
DECREASE_CACHE_SIZE = "decrease_cache_size",
|
||||
CLEAR_INEFFICIENT_CACHE = "clear_inefficient_cache",
|
||||
OPTIMIZE_DEDUPLICATION = "optimize_deduplication",
|
||||
REDUCE_MEMORY_USAGE = "reduce_memory_usage",
|
||||
TRIGGER_GARBAGE_COLLECTION = "trigger_garbage_collection",
|
||||
SCALE_HORIZONTALLY = "scale_horizontally",
|
||||
ALERT_OPERATORS = "alert_operators",
|
||||
}
|
||||
|
||||
/**
|
||||
* Optimization result
|
||||
*/
|
||||
export interface OptimizationResult {
|
||||
action: OptimizationAction;
|
||||
target: string;
|
||||
applied: boolean;
|
||||
result: {
|
||||
success: boolean;
|
||||
message: string;
|
||||
metrics?: {
|
||||
before: any;
|
||||
after: any;
|
||||
improvement: number; // Percentage
|
||||
};
|
||||
};
|
||||
timestamp: Date;
|
||||
}
|
||||
|
||||
/**
|
||||
* Auto-optimization configuration
|
||||
*/
|
||||
export interface AutoOptimizationConfig {
|
||||
enabled: boolean;
|
||||
interval: number; // Check interval in milliseconds
|
||||
thresholds: {
|
||||
memoryUsage: number; // MB
|
||||
cacheHitRate: number; // Percentage
|
||||
responseTime: number; // Milliseconds
|
||||
errorRate: number; // Percentage
|
||||
};
|
||||
actions: {
|
||||
autoCache: boolean;
|
||||
autoGarbageCollection: boolean;
|
||||
autoScaling: boolean;
|
||||
autoAlerting: boolean;
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Performance Optimizer Service
|
||||
*/
|
||||
export class PerformanceOptimizer {
|
||||
private optimizationHistory: OptimizationResult[] = [];
|
||||
private autoOptimizationInterval: NodeJS.Timeout | null = null;
|
||||
private isOptimizing = false;
|
||||
|
||||
private readonly defaultConfig: AutoOptimizationConfig = {
|
||||
enabled: false, // Manual activation required
|
||||
interval: 2 * TIME.MINUTE, // Check every 2 minutes
|
||||
thresholds: {
|
||||
memoryUsage: 300, // 300MB
|
||||
cacheHitRate: 40, // 40%
|
||||
responseTime: 1000, // 1 second
|
||||
errorRate: 5, // 5%
|
||||
},
|
||||
actions: {
|
||||
autoCache: true,
|
||||
autoGarbageCollection: false, // Dangerous in production
|
||||
autoScaling: false, // Requires infrastructure integration
|
||||
autoAlerting: true,
|
||||
},
|
||||
};
|
||||
|
||||
constructor(private config: Partial<AutoOptimizationConfig> = {}) {
|
||||
this.config = { ...this.defaultConfig, ...config };
|
||||
}
|
||||
|
||||
/**
|
||||
* Start automatic optimization
|
||||
*/
|
||||
startAutoOptimization(): void {
|
||||
if (this.autoOptimizationInterval || !this.config.enabled) {
|
||||
return;
|
||||
}
|
||||
|
||||
console.log("[Performance Optimizer] Starting auto-optimization");
|
||||
|
||||
this.autoOptimizationInterval = setInterval(async () => {
|
||||
try {
|
||||
await this.performOptimizationCycle();
|
||||
} catch (error) {
|
||||
console.error(
|
||||
"[Performance Optimizer] Auto-optimization failed:",
|
||||
error
|
||||
);
|
||||
}
|
||||
}, this.config.interval);
|
||||
}
|
||||
|
||||
/**
|
||||
* Stop automatic optimization
|
||||
*/
|
||||
stopAutoOptimization(): void {
|
||||
if (this.autoOptimizationInterval) {
|
||||
clearInterval(this.autoOptimizationInterval);
|
||||
this.autoOptimizationInterval = null;
|
||||
console.log("[Performance Optimizer] Stopped auto-optimization");
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Perform a single optimization cycle
|
||||
*/
|
||||
async performOptimizationCycle(): Promise<OptimizationResult[]> {
|
||||
if (this.isOptimizing) {
|
||||
return [];
|
||||
}
|
||||
|
||||
this.isOptimizing = true;
|
||||
const results: OptimizationResult[] = [];
|
||||
|
||||
try {
|
||||
console.log("[Performance Optimizer] Starting optimization cycle");
|
||||
|
||||
// Get current performance metrics
|
||||
const metrics = performanceMonitor.getCurrentMetrics();
|
||||
const bottlenecks = performanceMonitor.detectBottlenecks(metrics);
|
||||
|
||||
// Analyze and apply optimizations
|
||||
const optimizations = await this.analyzeAndOptimize(metrics, bottlenecks);
|
||||
results.push(...optimizations);
|
||||
|
||||
// Store results in history
|
||||
this.optimizationHistory.push(...results);
|
||||
|
||||
// Limit history size
|
||||
if (this.optimizationHistory.length > 100) {
|
||||
this.optimizationHistory = this.optimizationHistory.slice(-100);
|
||||
}
|
||||
|
||||
console.log(
|
||||
`[Performance Optimizer] Cycle complete: ${results.length} optimizations applied`
|
||||
);
|
||||
} finally {
|
||||
this.isOptimizing = false;
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* Analyze metrics and apply optimizations
|
||||
*/
|
||||
private async analyzeAndOptimize(
|
||||
metrics: PerformanceMetrics,
|
||||
bottlenecks: Bottleneck[]
|
||||
): Promise<OptimizationResult[]> {
|
||||
const results: OptimizationResult[] = [];
|
||||
|
||||
// Memory optimization
|
||||
if (metrics.memoryUsage.heapUsed > this.config.thresholds!.memoryUsage!) {
|
||||
results.push(...(await this.optimizeMemoryUsage(metrics)));
|
||||
}
|
||||
|
||||
// Cache optimization
|
||||
if (
|
||||
metrics.cacheMetrics.averageHitRate <
|
||||
this.config.thresholds!.cacheHitRate!
|
||||
) {
|
||||
results.push(...(await this.optimizeCaching(metrics)));
|
||||
}
|
||||
|
||||
// Response time optimization
|
||||
if (
|
||||
metrics.requestMetrics.averageResponseTime >
|
||||
this.config.thresholds!.responseTime!
|
||||
) {
|
||||
results.push(...(await this.optimizeResponseTime(metrics)));
|
||||
}
|
||||
|
||||
// Handle critical bottlenecks
|
||||
const criticalBottlenecks = bottlenecks.filter(
|
||||
(b) => b.severity === "critical"
|
||||
);
|
||||
if (criticalBottlenecks.length > 0) {
|
||||
results.push(
|
||||
...(await this.handleCriticalBottlenecks(criticalBottlenecks))
|
||||
);
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* Optimize memory usage
|
||||
*/
|
||||
private async optimizeMemoryUsage(
|
||||
metrics: PerformanceMetrics
|
||||
): Promise<OptimizationResult[]> {
|
||||
const results: OptimizationResult[] = [];
|
||||
|
||||
// Clear inefficient caches
|
||||
const cacheStats = cacheManager.getAllStats();
|
||||
for (const [cacheName, stats] of Object.entries(cacheStats)) {
|
||||
if (stats.hitRate < 0.2 && stats.memoryUsage > 10 * 1024 * 1024) {
|
||||
// 10MB
|
||||
const result = await this.clearCache(cacheName, stats);
|
||||
results.push(result);
|
||||
}
|
||||
}
|
||||
|
||||
// Trigger garbage collection if enabled and memory is very high
|
||||
if (
|
||||
this.config.actions!.autoGarbageCollection &&
|
||||
metrics.memoryUsage.heapUsed > 500 // 500MB
|
||||
) {
|
||||
const result = await this.triggerGarbageCollection(metrics);
|
||||
results.push(result);
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* Optimize caching performance
|
||||
*/
|
||||
private async optimizeCaching(
|
||||
metrics: PerformanceMetrics
|
||||
): Promise<OptimizationResult[]> {
|
||||
const results: OptimizationResult[] = [];
|
||||
|
||||
if (!this.config.actions!.autoCache) {
|
||||
return results;
|
||||
}
|
||||
|
||||
const cacheStats = cacheManager.getAllStats();
|
||||
|
||||
for (const [cacheName, stats] of Object.entries(cacheStats)) {
|
||||
// Increase TTL for high-hit-rate caches
|
||||
if (stats.hitRate > 0.8 && stats.size < stats.maxSize * 0.7) {
|
||||
const result = await this.adjustCacheTTL(cacheName, stats, "increase");
|
||||
results.push(result);
|
||||
}
|
||||
|
||||
// Decrease TTL for low-hit-rate caches
|
||||
else if (stats.hitRate < 0.3) {
|
||||
const result = await this.adjustCacheTTL(cacheName, stats, "decrease");
|
||||
results.push(result);
|
||||
}
|
||||
|
||||
// Increase cache size if constantly at max
|
||||
else if (stats.size >= stats.maxSize * 0.95 && stats.hitRate > 0.6) {
|
||||
const result = await this.adjustCacheSize(cacheName, stats, "increase");
|
||||
results.push(result);
|
||||
}
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* Optimize response times
|
||||
*/
|
||||
private async optimizeResponseTime(
|
||||
metrics: PerformanceMetrics
|
||||
): Promise<OptimizationResult[]> {
|
||||
const results: OptimizationResult[] = [];
|
||||
|
||||
// Optimize deduplication settings
|
||||
const deduplicationStats = deduplicationManager.getAllStats();
|
||||
for (const [name, stats] of Object.entries(deduplicationStats)) {
|
||||
if (stats.hitRate < 0.3) {
|
||||
const result = await this.optimizeDeduplication(name, stats);
|
||||
results.push(result);
|
||||
}
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle critical bottlenecks
|
||||
*/
|
||||
private async handleCriticalBottlenecks(
|
||||
bottlenecks: Bottleneck[]
|
||||
): Promise<OptimizationResult[]> {
|
||||
const results: OptimizationResult[] = [];
|
||||
|
||||
for (const bottleneck of bottlenecks) {
|
||||
switch (bottleneck.type) {
|
||||
case "memory":
|
||||
results.push(...(await this.handleMemoryBottleneck(bottleneck)));
|
||||
break;
|
||||
case "event_loop":
|
||||
results.push(...(await this.handleEventLoopBottleneck(bottleneck)));
|
||||
break;
|
||||
case "cache_miss":
|
||||
results.push(...(await this.handleCacheBottleneck(bottleneck)));
|
||||
break;
|
||||
default:
|
||||
// Alert operators for unknown bottlenecks
|
||||
if (this.config.actions!.autoAlerting) {
|
||||
const result = await this.alertOperators(bottleneck);
|
||||
results.push(result);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* Clear inefficient cache
|
||||
*/
|
||||
private async clearCache(
|
||||
cacheName: string,
|
||||
stats: CacheStats
|
||||
): Promise<OptimizationResult> {
|
||||
const beforeStats = { ...stats };
|
||||
|
||||
try {
|
||||
const success = cacheManager.removeCache(cacheName);
|
||||
|
||||
return {
|
||||
action: OptimizationAction.CLEAR_INEFFICIENT_CACHE,
|
||||
target: cacheName,
|
||||
applied: true,
|
||||
result: {
|
||||
success,
|
||||
message: success
|
||||
? `Cleared inefficient cache '${cacheName}' (hit rate: ${(stats.hitRate * 100).toFixed(1)}%)`
|
||||
: `Failed to clear cache '${cacheName}'`,
|
||||
metrics: {
|
||||
before: beforeStats,
|
||||
after: { hitRate: 0, memoryUsage: 0, size: 0 },
|
||||
improvement: success ? 100 : 0,
|
||||
},
|
||||
},
|
||||
timestamp: new Date(),
|
||||
};
|
||||
} catch (error) {
|
||||
return {
|
||||
action: OptimizationAction.CLEAR_INEFFICIENT_CACHE,
|
||||
target: cacheName,
|
||||
applied: false,
|
||||
result: {
|
||||
success: false,
|
||||
message: `Error clearing cache '${cacheName}': ${error}`,
|
||||
},
|
||||
timestamp: new Date(),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Trigger garbage collection
|
||||
*/
|
||||
private async triggerGarbageCollection(
|
||||
metrics: PerformanceMetrics
|
||||
): Promise<OptimizationResult> {
|
||||
const beforeMemory = metrics.memoryUsage.heapUsed;
|
||||
|
||||
try {
|
||||
// Force garbage collection if available
|
||||
if (global.gc) {
|
||||
global.gc();
|
||||
|
||||
// Wait a bit and measure again
|
||||
await new Promise((resolve) => setTimeout(resolve, 1000));
|
||||
const afterMetrics = performanceMonitor.getCurrentMetrics();
|
||||
const afterMemory = afterMetrics.memoryUsage.heapUsed;
|
||||
const improvement = ((beforeMemory - afterMemory) / beforeMemory) * 100;
|
||||
|
||||
return {
|
||||
action: OptimizationAction.TRIGGER_GARBAGE_COLLECTION,
|
||||
target: "system",
|
||||
applied: true,
|
||||
result: {
|
||||
success: true,
|
||||
message: `Garbage collection freed ${(beforeMemory - afterMemory).toFixed(1)}MB`,
|
||||
metrics: {
|
||||
before: { heapUsed: beforeMemory },
|
||||
after: { heapUsed: afterMemory },
|
||||
improvement: Math.max(0, improvement),
|
||||
},
|
||||
},
|
||||
timestamp: new Date(),
|
||||
};
|
||||
} else {
|
||||
return {
|
||||
action: OptimizationAction.TRIGGER_GARBAGE_COLLECTION,
|
||||
target: "system",
|
||||
applied: false,
|
||||
result: {
|
||||
success: false,
|
||||
message: "Garbage collection not available (run with --expose-gc)",
|
||||
},
|
||||
timestamp: new Date(),
|
||||
};
|
||||
}
|
||||
} catch (error) {
|
||||
return {
|
||||
action: OptimizationAction.TRIGGER_GARBAGE_COLLECTION,
|
||||
target: "system",
|
||||
applied: false,
|
||||
result: {
|
||||
success: false,
|
||||
message: `Garbage collection failed: ${error}`,
|
||||
},
|
||||
timestamp: new Date(),
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Adjust cache TTL
|
||||
*/
|
||||
private async adjustCacheTTL(
|
||||
cacheName: string,
|
||||
stats: CacheStats,
|
||||
direction: "increase" | "decrease"
|
||||
): Promise<OptimizationResult> {
|
||||
// This would require cache implementation changes to support runtime TTL adjustment
|
||||
// For now, we'll return a recommendation
|
||||
|
||||
const multiplier = direction === "increase" ? 1.5 : 0.7;
|
||||
const recommendedTTL = Math.round(5 * TIME.MINUTE * multiplier);
|
||||
|
||||
return {
|
||||
action: OptimizationAction.ADJUST_CACHE_TTL,
|
||||
target: cacheName,
|
||||
applied: false, // Would need implementation
|
||||
result: {
|
||||
success: false,
|
||||
message: `Recommend ${direction}ing TTL for '${cacheName}' to ${recommendedTTL}ms (current hit rate: ${(stats.hitRate * 100).toFixed(1)}%)`,
|
||||
},
|
||||
timestamp: new Date(),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Adjust cache size
|
||||
*/
|
||||
private async adjustCacheSize(
|
||||
cacheName: string,
|
||||
stats: CacheStats,
|
||||
direction: "increase" | "decrease"
|
||||
): Promise<OptimizationResult> {
|
||||
// This would require cache implementation changes
|
||||
|
||||
const multiplier = direction === "increase" ? 1.3 : 0.8;
|
||||
const recommendedSize = Math.round(stats.maxSize * multiplier);
|
||||
|
||||
return {
|
||||
action:
|
||||
direction === "increase"
|
||||
? OptimizationAction.INCREASE_CACHE_SIZE
|
||||
: OptimizationAction.DECREASE_CACHE_SIZE,
|
||||
target: cacheName,
|
||||
applied: false, // Would need implementation
|
||||
result: {
|
||||
success: false,
|
||||
message: `Recommend ${direction}ing size for '${cacheName}' to ${recommendedSize} (current: ${stats.size}/${stats.maxSize})`,
|
||||
},
|
||||
timestamp: new Date(),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Optimize deduplication settings
|
||||
*/
|
||||
private async optimizeDeduplication(
|
||||
name: string,
|
||||
stats: any
|
||||
): Promise<OptimizationResult> {
|
||||
return {
|
||||
action: OptimizationAction.OPTIMIZE_DEDUPLICATION,
|
||||
target: name,
|
||||
applied: false, // Would need implementation
|
||||
result: {
|
||||
success: false,
|
||||
message: `Recommend increasing TTL for '${name}' deduplicator (current hit rate: ${(stats.hitRate * 100).toFixed(1)}%)`,
|
||||
},
|
||||
timestamp: new Date(),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle memory bottleneck
|
||||
*/
|
||||
private async handleMemoryBottleneck(
|
||||
bottleneck: Bottleneck
|
||||
): Promise<OptimizationResult[]> {
|
||||
const results: OptimizationResult[] = [];
|
||||
|
||||
// Clear caches aggressively
|
||||
cacheManager.clearAll();
|
||||
results.push({
|
||||
action: OptimizationAction.REDUCE_MEMORY_USAGE,
|
||||
target: "all-caches",
|
||||
applied: true,
|
||||
result: {
|
||||
success: true,
|
||||
message: "Cleared all caches due to memory bottleneck",
|
||||
},
|
||||
timestamp: new Date(),
|
||||
});
|
||||
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle event loop bottleneck
|
||||
*/
|
||||
private async handleEventLoopBottleneck(
|
||||
bottleneck: Bottleneck
|
||||
): Promise<OptimizationResult[]> {
|
||||
return [
|
||||
{
|
||||
action: OptimizationAction.ALERT_OPERATORS,
|
||||
target: "event-loop",
|
||||
applied: true,
|
||||
result: {
|
||||
success: true,
|
||||
message:
|
||||
"Event loop bottleneck detected - operator intervention required",
|
||||
},
|
||||
timestamp: new Date(),
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
/**
|
||||
* Handle cache bottleneck
|
||||
*/
|
||||
private async handleCacheBottleneck(
|
||||
bottleneck: Bottleneck
|
||||
): Promise<OptimizationResult[]> {
|
||||
// Could implement cache warming or size adjustments
|
||||
return [
|
||||
{
|
||||
action: OptimizationAction.OPTIMIZE_DEDUPLICATION,
|
||||
target: "cache-system",
|
||||
applied: false,
|
||||
result: {
|
||||
success: false,
|
||||
message:
|
||||
"Cache performance bottleneck - manual optimization recommended",
|
||||
},
|
||||
timestamp: new Date(),
|
||||
},
|
||||
];
|
||||
}
|
||||
|
||||
/**
|
||||
* Alert operators
|
||||
*/
|
||||
private async alertOperators(
|
||||
bottleneck: Bottleneck
|
||||
): Promise<OptimizationResult> {
|
||||
// Would integrate with alerting system
|
||||
console.warn("[Performance Optimizer] ALERT:", bottleneck);
|
||||
|
||||
return {
|
||||
action: OptimizationAction.ALERT_OPERATORS,
|
||||
target: `${bottleneck.type}-bottleneck`,
|
||||
applied: true,
|
||||
result: {
|
||||
success: true,
|
||||
message: `Alerted operators about ${bottleneck.type} bottleneck (impact: ${bottleneck.impact})`,
|
||||
},
|
||||
timestamp: new Date(),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Get optimization history
|
||||
*/
|
||||
getOptimizationHistory(limit?: number): OptimizationResult[] {
|
||||
return limit
|
||||
? this.optimizationHistory.slice(-limit)
|
||||
: [...this.optimizationHistory];
|
||||
}
|
||||
|
||||
/**
|
||||
* Get optimization statistics
|
||||
*/
|
||||
getOptimizationStats(): {
|
||||
totalOptimizations: number;
|
||||
successfulOptimizations: number;
|
||||
actionCounts: Record<OptimizationAction, number>;
|
||||
averageImprovementRate: number;
|
||||
recentOptimizations: OptimizationResult[];
|
||||
} {
|
||||
const successful = this.optimizationHistory.filter((r) => r.result.success);
|
||||
const actionCounts = {} as Record<OptimizationAction, number>;
|
||||
|
||||
// Count actions
|
||||
this.optimizationHistory.forEach((result) => {
|
||||
actionCounts[result.action] = (actionCounts[result.action] || 0) + 1;
|
||||
});
|
||||
|
||||
// Calculate average improvement
|
||||
const improvementRates = this.optimizationHistory
|
||||
.filter((r) => r.result.metrics?.improvement)
|
||||
.map((r) => r.result.metrics!.improvement);
|
||||
|
||||
const averageImprovementRate =
|
||||
improvementRates.length > 0
|
||||
? improvementRates.reduce((sum, rate) => sum + rate, 0) /
|
||||
improvementRates.length
|
||||
: 0;
|
||||
|
||||
return {
|
||||
totalOptimizations: this.optimizationHistory.length,
|
||||
successfulOptimizations: successful.length,
|
||||
actionCounts,
|
||||
averageImprovementRate,
|
||||
recentOptimizations: this.optimizationHistory.slice(-10),
|
||||
};
|
||||
}
|
||||
|
||||
/**
|
||||
* Manual optimization trigger
|
||||
*/
|
||||
async runManualOptimization(target?: {
|
||||
type: "memory" | "cache" | "deduplication" | "all";
|
||||
specific?: string;
|
||||
}): Promise<OptimizationResult[]> {
|
||||
const metrics = performanceMonitor.getCurrentMetrics();
|
||||
const bottlenecks = performanceMonitor.detectBottlenecks(metrics);
|
||||
|
||||
if (!target || target.type === "all") {
|
||||
return this.analyzeAndOptimize(metrics, bottlenecks);
|
||||
}
|
||||
|
||||
switch (target.type) {
|
||||
case "memory":
|
||||
return this.optimizeMemoryUsage(metrics);
|
||||
case "cache":
|
||||
return this.optimizeCaching(metrics);
|
||||
case "deduplication":
|
||||
return this.optimizeResponseTime(metrics);
|
||||
default:
|
||||
return [];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Global performance optimizer instance
|
||||
*/
|
||||
export const performanceOptimizer = new PerformanceOptimizer();
|
||||
Reference in New Issue
Block a user