3 Commits

Author SHA1 Message Date
fd55b30398 shit 2025-06-26 22:43:22 +02:00
8774a1f155 feat: Refactor sentiment handling and enhance processing logic for session data 2025-06-26 21:14:24 +02:00
653d70022b Broken shit 2025-06-26 21:00:19 +02:00
126 changed files with 15413 additions and 16923 deletions

View File

@ -1 +0,0 @@
Use pnpm to manage this project, not npm!

View File

@ -2,25 +2,12 @@
# This file ensures NextAuth always has necessary environment variables in development # This file ensures NextAuth always has necessary environment variables in development
# NextAuth.js configuration # NextAuth.js configuration
NEXTAUTH_URL="http://localhost:3000" NEXTAUTH_URL=http://192.168.1.2:3000
NEXTAUTH_SECRET="this_is_a_fixed_secret_for_development_only" NEXTAUTH_SECRET=this_is_a_fixed_secret_for_development_only
NODE_ENV="development" NODE_ENV=development
# OpenAI API key for session processing # OpenAI API key for session processing
# Add your API key here: OPENAI_API_KEY=sk-... # Add your API key here: OPENAI_API_KEY=sk-...
OPENAI_API_KEY="your_openai_api_key_here" OPENAI_API_KEY=your_openai_api_key_here
# Database connection - already configured in your prisma/schema.prisma # Database connection - already configured in your prisma/schema.prisma
# Scheduler Configuration
SCHEDULER_ENABLED="false" # Enable/disable all schedulers (false for dev, true for production)
CSV_IMPORT_INTERVAL="*/15 * * * *" # Cron expression for CSV imports (every 15 minutes)
IMPORT_PROCESSING_INTERVAL="*/5 * * * *" # Cron expression for processing imports to sessions (every 5 minutes)
IMPORT_PROCESSING_BATCH_SIZE="50" # Number of imports to process at once
SESSION_PROCESSING_INTERVAL="0 * * * *" # Cron expression for AI session processing (every hour)
SESSION_PROCESSING_BATCH_SIZE="0" # 0 = unlimited sessions, >0 = specific limit
SESSION_PROCESSING_CONCURRENCY="5" # How many sessions to process in parallel
# Postgres Database Configuration
DATABASE_URL_TEST="postgresql://"
DATABASE_URL="postgresql://"

View File

@ -1,29 +0,0 @@
# Copy this file to .env.local and configure as needed
# NextAuth.js configuration
NEXTAUTH_URL="http://localhost:3000"
NEXTAUTH_SECRET="your_secret_key_here"
NODE_ENV="development"
# OpenAI API key for session processing
OPENAI_API_KEY="your_openai_api_key_here"
# Scheduler Configuration
SCHEDULER_ENABLED="true" # Set to false to disable all schedulers during development
CSV_IMPORT_INTERVAL="*/15 * * * *" # Every 15 minutes (cron format)
IMPORT_PROCESSING_INTERVAL="*/5 * * * *" # Every 5 minutes (cron format) - converts imports to sessions
IMPORT_PROCESSING_BATCH_SIZE="50" # Number of imports to process at once
SESSION_PROCESSING_INTERVAL="0 * * * *" # Every hour (cron format) - AI processing
SESSION_PROCESSING_BATCH_SIZE="0" # 0 = process all sessions, >0 = limit number
SESSION_PROCESSING_CONCURRENCY="5" # Number of sessions to process in parallel
# Postgres Database Configuration
DATABASE_URL_TEST="postgresql://"
DATABASE_URL="postgresql://"
# Example configurations:
# - For development (no schedulers): SCHEDULER_ENABLED=false
# - For testing (every 5 minutes): CSV_IMPORT_INTERVAL=*/5 * * * *
# - For faster import processing: IMPORT_PROCESSING_INTERVAL=*/2 * * * *
# - For limited processing: SESSION_PROCESSING_BATCH_SIZE=10
# - For high concurrency: SESSION_PROCESSING_CONCURRENCY=10

20
.gemini/settings.json Normal file
View File

@ -0,0 +1,20 @@
{
"mcpServers": {
"sqlite": {
"command": "uvx",
"args": [
"mcp-server-sqlite",
"--db-path",
"./prisma/dev.db"
]
},
"filesystem": {
"command": "npx",
"args": [
"-y",
"@modelcontextprotocol/server-filesystem",
"D:\\Notso\\Product\\Vibe-coding\\livedash-node"
]
}
}
}

4
.gitignore vendored
View File

@ -261,7 +261,3 @@ Thumbs.db
/playwright-report/ /playwright-report/
/blob-report/ /blob-report/
/playwright/.cache/ /playwright/.cache/
# OpenAI API request samples
sample-openai-request.json
admin-user.txt

47
GEMINI.md Normal file
View File

@ -0,0 +1,47 @@
# Project Overview
This project is a Next.js application with a Node.js backend, designed to provide a live dashboard for data visualization and session management.
## Setup
To set up the project, follow these steps:
1. **Install Dependencies:**
```bash
npm install
```
2. **Environment Variables:**
Create a `.env` file based on `.env.example` and fill in the necessary environment variables.
3. **Database Setup:**
Run database migrations:
```bash
npx prisma migrate dev
```
Seed the database (optional):
```bash
npx prisma db seed
```
4. **Run Development Server:**
```bash
npm run dev
```
## Common Commands
- **Run Tests:**
```bash
npm test
```
- **Run Linter:**
```bash
npm run lint
```
- **Build Project:**
```bash
npm run build
```

106
README.md
View File

@ -2,65 +2,65 @@
A real-time analytics dashboard for monitoring user sessions and interactions with interactive data visualizations and detailed metrics. A real-time analytics dashboard for monitoring user sessions and interactions with interactive data visualizations and detailed metrics.
![Next.js](https://img.shields.io/badge/dynamic/regex?url=https%3A%2F%2Fraw.githubusercontent.com%2Fkjanat%2Flivedash-node%2Fmaster%2Fpackage.json&search=%22next%22%5Cs*%3A%5Cs*%22%5C%5E(%3F%3Cversion%3E%5Cd%2B%5C.%5Cd*).*%22&replace=%24%3Cversion%3E&logo=nextdotjs&label=Nextjs&color=%23000000) ![Next.js](<https://img.shields.io/badge/dynamic/regex?url=https%3A%2F%2Fraw.githubusercontent.com%2Fkjanat%2Flivedash-node%2Fmaster%2Fpackage.json&search=%22next%22%5Cs*%3A%5Cs*%22%5C%5E(%3F%3Cversion%3E%5Cd%2B%5C.%5Cd*).*%22&replace=%24%3Cversion%3E&logo=nextdotjs&label=Nextjs&color=%23000000>)
![React](https://img.shields.io/badge/dynamic/regex?url=https%3A%2F%2Fraw.githubusercontent.com%2Fkjanat%2Flivedash-node%2Fmaster%2Fpackage.json&search=%22react%22%5Cs*%3A%5Cs*%22%5C%5E(%3F%3Cversion%3E%5Cd%2B%5C.%5Cd*).*%22&replace=%24%3Cversion%3E&logo=react&label=React&color=%2361DAFB) ![React](<https://img.shields.io/badge/dynamic/regex?url=https%3A%2F%2Fraw.githubusercontent.com%2Fkjanat%2Flivedash-node%2Fmaster%2Fpackage.json&search=%22react%22%5Cs*%3A%5Cs*%22%5C%5E(%3F%3Cversion%3E%5Cd%2B%5C.%5Cd*).*%22&replace=%24%3Cversion%3E&logo=react&label=React&color=%2361DAFB>)
![TypeScript](https://img.shields.io/badge/dynamic/regex?url=https%3A%2F%2Fraw.githubusercontent.com%2Fkjanat%2Flivedash-node%2Fmaster%2Fpackage.json&search=%22typescript%22%5Cs*%3A%5Cs*%22%5C%5E(%3F%3Cversion%3E%5Cd%2B%5C.%5Cd*).*%22&replace=%24%3Cversion%3E&logo=typescript&label=TypeScript&color=%233178C6) ![TypeScript](<https://img.shields.io/badge/dynamic/regex?url=https%3A%2F%2Fraw.githubusercontent.com%2Fkjanat%2Flivedash-node%2Fmaster%2Fpackage.json&search=%22typescript%22%5Cs*%3A%5Cs*%22%5C%5E(%3F%3Cversion%3E%5Cd%2B%5C.%5Cd*).*%22&replace=%24%3Cversion%3E&logo=typescript&label=TypeScript&color=%233178C6>)
![Prisma](https://img.shields.io/badge/dynamic/regex?url=https%3A%2F%2Fraw.githubusercontent.com%2Fkjanat%2Flivedash-node%2Fmaster%2Fpackage.json&search=%22prisma%22%5Cs*%3A%5Cs*%22%5C%5E(%3F%3Cversion%3E%5Cd%2B%5C.%5Cd*).*%22&replace=%24%3Cversion%3E&logo=prisma&label=Prisma&color=%232D3748) ![Prisma](<https://img.shields.io/badge/dynamic/regex?url=https%3A%2F%2Fraw.githubusercontent.com%2Fkjanat%2Flivedash-node%2Fmaster%2Fpackage.json&search=%22prisma%22%5Cs*%3A%5Cs*%22%5C%5E(%3F%3Cversion%3E%5Cd%2B%5C.%5Cd*).*%22&replace=%24%3Cversion%3E&logo=prisma&label=Prisma&color=%232D3748>)
![TailwindCSS](https://img.shields.io/badge/dynamic/regex?url=https%3A%2F%2Fraw.githubusercontent.com%2Fkjanat%2Flivedash-node%2Fmaster%2Fpackage.json&search=%22tailwindcss%22%5Cs*%3A%5Cs*%22%5C%5E(%3F%3Cversion%3E%5Cd%2B%5C.%5Cd*).*%22&replace=%24%3Cversion%3E&logo=tailwindcss&label=TailwindCSS&color=%2306B6D4) ![TailwindCSS](<https://img.shields.io/badge/dynamic/regex?url=https%3A%2F%2Fraw.githubusercontent.com%2Fkjanat%2Flivedash-node%2Fmaster%2Fpackage.json&search=%22tailwindcss%22%5Cs*%3A%5Cs*%22%5C%5E(%3F%3Cversion%3E%5Cd%2B%5C.%5Cd*).*%22&replace=%24%3Cversion%3E&logo=tailwindcss&label=TailwindCSS&color=%2306B6D4>)
## Features ## Features
- **Real-time Session Monitoring**: Track and analyze user sessions as they happen - **Real-time Session Monitoring**: Track and analyze user sessions as they happen
- **Interactive Visualizations**: Geographic maps, response time distributions, and more - **Interactive Visualizations**: Geographic maps, response time distributions, and more
- **Advanced Analytics**: Detailed metrics and insights about user behavior - **Advanced Analytics**: Detailed metrics and insights about user behavior
- **User Management**: Secure authentication with role-based access control - **User Management**: Secure authentication with role-based access control
- **Customizable Dashboard**: Filter and sort data based on your specific needs - **Customizable Dashboard**: Filter and sort data based on your specific needs
- **Session Details**: In-depth analysis of individual user sessions - **Session Details**: In-depth analysis of individual user sessions
## Tech Stack ## Tech Stack
- **Frontend**: React 19, Next.js 15, TailwindCSS 4 - **Frontend**: React 19, Next.js 15, TailwindCSS 4
- **Backend**: Next.js API Routes, Node.js - **Backend**: Next.js API Routes, Node.js
- **Database**: Prisma ORM with SQLite (default), compatible with PostgreSQL - **Database**: Prisma ORM with SQLite (default), compatible with PostgreSQL
- **Authentication**: NextAuth.js - **Authentication**: NextAuth.js
- **Visualization**: Chart.js, D3.js, React Leaflet - **Visualization**: Chart.js, D3.js, React Leaflet
- **Data Processing**: Node-cron for scheduled tasks - **Data Processing**: Node-cron for scheduled tasks
## Getting Started ## Getting Started
### Prerequisites ### Prerequisites
- Node.js (LTS version recommended) - Node.js (LTS version recommended)
- npm or yarn - npm or yarn
### Installation ### Installation
1. Clone this repository: 1. Clone this repository:
```bash ```bash
git clone https://github.com/kjanat/livedash-node.git git clone https://github.com/kjanat/livedash-node.git
cd livedash-node cd livedash-node
``` ```
2. Install dependencies: 2. Install dependencies:
```bash ```bash
npm install npm install
``` ```
3. Set up the database: 3. Set up the database:
```bash ```bash
npm run prisma:generate npm run prisma:generate
npm run prisma:migrate npm run prisma:migrate
npm run prisma:seed npm run prisma:seed
``` ```
4. Start the development server: 4. Start the development server:
```bash ```bash
npm run dev npm run dev
``` ```
5. Open your browser and navigate to <http://localhost:3000> 5. Open your browser and navigate to <http://localhost:3000>
@ -76,22 +76,22 @@ NEXTAUTH_SECRET=your-secret-here
## Project Structure ## Project Structure
- `app/`: Next.js App Router components and pages - `app/`: Next.js App Router components and pages
- `components/`: Reusable React components - `components/`: Reusable React components
- `lib/`: Utility functions and shared code - `lib/`: Utility functions and shared code
- `pages/`: API routes and server-side code - `pages/`: API routes and server-side code
- `prisma/`: Database schema and migrations - `prisma/`: Database schema and migrations
- `public/`: Static assets - `public/`: Static assets
- `docs/`: Project documentation - `docs/`: Project documentation
## Available Scripts ## Available Scripts
- `npm run dev`: Start the development server - `npm run dev`: Start the development server
- `npm run build`: Build the application for production - `npm run build`: Build the application for production
- `npm run start`: Run the production build - `npm run start`: Run the production build
- `npm run lint`: Run ESLint - `npm run lint`: Run ESLint
- `npm run format`: Format code with Prettier - `npm run format`: Format code with Prettier
- `npm run prisma:studio`: Open Prisma Studio to view database - `npm run prisma:studio`: Open Prisma Studio to view database
## Contributing ## Contributing
@ -107,9 +107,9 @@ This project is not licensed for commercial use without explicit permission. Fre
## Acknowledgments ## Acknowledgments
- [Next.js](https://nextjs.org/) - [Next.js](https://nextjs.org/)
- [Prisma](https://prisma.io/) - [Prisma](https://prisma.io/)
- [TailwindCSS](https://tailwindcss.com/) - [TailwindCSS](https://tailwindcss.com/)
- [Chart.js](https://www.chartjs.org/) - [Chart.js](https://www.chartjs.org/)
- [D3.js](https://d3js.org/) - [D3.js](https://d3js.org/)
- [React Leaflet](https://react-leaflet.js.org/) - [React Leaflet](https://react-leaflet.js.org/)

78
TODO.md Normal file
View File

@ -0,0 +1,78 @@
# TODO.md
# Refactor!!!
> Based on my analysis of the codebase, here is a plan with recommendations for improving the project. The focus is on enhancing standardization, abstraction, user experience, and visual
> design.
## High-Level Recommendations
The project has a solid foundation, but it could be significantly improved by focusing on three key areas:
1. Adopt a UI Component Library: While Tailwind CSS is excellent for styling, using a component library like ShadCN/UI or Headless UI would provide pre-built, accessible, and visually
consistent components, saving development time and improving the user experience.
2. Refactor for Next.js App Router: The project currently uses a mix of the pages and app directories. Migrating fully to the App Router would simplify the project structure, improve
performance, and align with the latest Next.js features.
3. Enhance User Experience: Implementing consistent loading and error states, improving responsiveness, and providing better user feedback would make the application more robust and
user-friendly.
## Detailed Improvement Plan
Here is a phased plan to implement these recommendations:
### Phase 1: Foundational Improvements (Standardization & Abstraction)
This phase focuses on cleaning up the codebase, standardizing the project structure, and improving the abstraction of core functionalities.
1. Standardize Project Structure:
- [x] Unify Server File: Consolidated server.js, server.mjs, and server.ts into a single server.ts file to remove redundancy. ✅
- [x] Migrate to App Router: All API routes moved from `pages/api` to `app/api`. ✅
- [x] Standardize Naming Conventions: All files and components already follow a consistent naming convention (e.g., PascalCase for components, kebab-case for files). ✅
2. Introduce a UI Component Library:
- Integrate ShadCN/UI: Add ShadCN/UI to the project to leverage its extensive library of accessible and customizable components.
- Replace Custom Components: Gradually replace custom-built components in the components/ directory with their ShadCN/UI equivalents. This will improve visual consistency and reduce
maintenance overhead.
3. Refactor Core Logic:
- Centralize Data Fetching: Create a dedicated module (e.g., lib/data-service.ts) to handle all data fetching logic, abstracting away the details of using Prisma and external APIs.
- Isolate Business Logic: Ensure that business logic (e.g., session processing, metric calculation) is separated from the API routes and UI components.
### Phase 2: UX and Visual Enhancements
This phase focuses on improving the user-facing aspects of the application.
1. Implement Comprehensive Loading and Error States:
- Skeleton Loaders: Use skeleton loaders for dashboard components to provide a better loading experience.
- Global Error Handling: Implement a global error handling strategy to catch and display user-friendly error messages for API failures or other unexpected issues.
2. Redesign the Dashboard:
- Improve Information Hierarchy: Reorganize the dashboard to present the most important information first.
- Enhance Visual Appeal: Use the new component library to create a more modern and visually appealing design with a consistent color palette and typography.
- Improve Chart Interactivity: Add features like tooltips, zooming, and filtering to the charts to make them more interactive and informative.
3. Ensure Full Responsiveness:
- Mobile-First Approach: Review and update all pages and components to ensure they are fully responsive and usable on a wide range of devices.
### Phase 3: Advanced Topics (Security, Performance, and Documentation)
This phase focuses on long-term improvements to the project's stability, performance, and maintainability.
1. Conduct a Security Review:
- Input Validation: Ensure that all user inputs are properly validated on both the client and server sides.
- Dependency Audit: Regularly audit dependencies for known vulnerabilities.
2. Optimize Performance:
- Code Splitting: Leverage Next.js's automatic code splitting to reduce initial load times.
- Caching: Implement caching strategies for frequently accessed data to reduce database load and improve API response times.
3. Expand Documentation:
- API Documentation: Create detailed documentation for all API endpoints.
- Component Library: Document the usage and props of all reusable components.
- Update `AGENTS.md`: Keep the AGENTS.md file up-to-date with any architectural changes.

View File

@ -1,136 +0,0 @@
import { NextRequest, NextResponse } from "next/server";
import { fetchAndParseCsv } from "../../../../lib/csvFetcher";
import { processQueuedImports } from "../../../../lib/importProcessor";
import { prisma } from "../../../../lib/prisma";
export async function POST(request: NextRequest) {
try {
const body = await request.json();
let { companyId } = body;
if (!companyId) {
// Try to get user from prisma based on session cookie
try {
const session = await prisma.session.findFirst({
orderBy: { createdAt: "desc" },
where: {
/* Add session check criteria here */
},
});
if (session) {
companyId = session.companyId;
}
} catch (error) {
// Log error for server-side debugging
const errorMessage =
error instanceof Error ? error.message : String(error);
// Use a server-side logging approach instead of console
process.stderr.write(`Error fetching session: ${errorMessage}\n`);
}
}
if (!companyId) {
return NextResponse.json(
{ error: "Company ID is required" },
{ status: 400 }
);
}
const company = await prisma.company.findUnique({ where: { id: companyId } });
if (!company) {
return NextResponse.json(
{ error: "Company not found" },
{ status: 404 }
);
}
const rawSessionData = await fetchAndParseCsv(
company.csvUrl,
company.csvUsername as string | undefined,
company.csvPassword as string | undefined
);
let importedCount = 0;
// Create SessionImport records for new data
for (const rawSession of rawSessionData) {
try {
// Use upsert to handle duplicates gracefully
await prisma.sessionImport.upsert({
where: {
companyId_externalSessionId: {
companyId: company.id,
externalSessionId: rawSession.externalSessionId,
},
},
update: {
// Update existing record with latest data
startTimeRaw: rawSession.startTimeRaw,
endTimeRaw: rawSession.endTimeRaw,
ipAddress: rawSession.ipAddress,
countryCode: rawSession.countryCode,
language: rawSession.language,
messagesSent: rawSession.messagesSent,
sentimentRaw: rawSession.sentimentRaw,
escalatedRaw: rawSession.escalatedRaw,
forwardedHrRaw: rawSession.forwardedHrRaw,
fullTranscriptUrl: rawSession.fullTranscriptUrl,
avgResponseTimeSeconds: rawSession.avgResponseTimeSeconds,
tokens: rawSession.tokens,
tokensEur: rawSession.tokensEur,
category: rawSession.category,
initialMessage: rawSession.initialMessage,
// Status tracking now handled by ProcessingStatusManager
},
create: {
companyId: company.id,
externalSessionId: rawSession.externalSessionId,
startTimeRaw: rawSession.startTimeRaw,
endTimeRaw: rawSession.endTimeRaw,
ipAddress: rawSession.ipAddress,
countryCode: rawSession.countryCode,
language: rawSession.language,
messagesSent: rawSession.messagesSent,
sentimentRaw: rawSession.sentimentRaw,
escalatedRaw: rawSession.escalatedRaw,
forwardedHrRaw: rawSession.forwardedHrRaw,
fullTranscriptUrl: rawSession.fullTranscriptUrl,
avgResponseTimeSeconds: rawSession.avgResponseTimeSeconds,
tokens: rawSession.tokens,
tokensEur: rawSession.tokensEur,
category: rawSession.category,
initialMessage: rawSession.initialMessage,
// Status tracking now handled by ProcessingStatusManager
},
});
importedCount++;
} catch (error) {
// Log individual session import errors but continue processing
process.stderr.write(
`Failed to import session ${rawSession.externalSessionId}: ${error}\n`
);
}
}
// Immediately process the queued imports to create Session records
console.log('[Refresh API] Processing queued imports...');
await processQueuedImports(100); // Process up to 100 imports immediately
// Count how many sessions were created
const sessionCount = await prisma.session.count({
where: { companyId: company.id }
});
return NextResponse.json({
ok: true,
imported: importedCount,
total: rawSessionData.length,
sessions: sessionCount,
message: `Successfully imported ${importedCount} records and processed them into sessions. Total sessions: ${sessionCount}`
});
} catch (e) {
const error = e instanceof Error ? e.message : "An unknown error occurred";
return NextResponse.json({ error }, { status: 500 });
}
}

View File

@ -1,105 +0,0 @@
import { NextRequest, NextResponse } from "next/server";
import { getServerSession } from "next-auth";
import { authOptions } from "../../auth/[...nextauth]/route";
import { prisma } from "../../../../lib/prisma";
import { processUnprocessedSessions } from "../../../../lib/processingScheduler";
import { ProcessingStatusManager } from "../../../../lib/processingStatusManager";
import { ProcessingStage } from "@prisma/client";
interface SessionUser {
email: string;
name?: string;
}
interface SessionData {
user: SessionUser;
}
export async function POST(request: NextRequest) {
const session = (await getServerSession(authOptions)) as SessionData | null;
if (!session?.user) {
return NextResponse.json({ error: "Not logged in" }, { status: 401 });
}
const user = await prisma.user.findUnique({
where: { email: session.user.email },
include: { company: true },
});
if (!user) {
return NextResponse.json({ error: "No user found" }, { status: 401 });
}
// Check if user has ADMIN role
if (user.role !== "ADMIN") {
return NextResponse.json(
{ error: "Admin access required" },
{ status: 403 }
);
}
try {
// Get optional parameters from request body
const body = await request.json();
const { batchSize, maxConcurrency } = body;
// Validate parameters
const validatedBatchSize = batchSize && batchSize > 0 ? parseInt(batchSize) : null;
const validatedMaxConcurrency = maxConcurrency && maxConcurrency > 0 ? parseInt(maxConcurrency) : 5;
// Check how many sessions need AI processing using the new status system
const sessionsNeedingAI = await ProcessingStatusManager.getSessionsNeedingProcessing(
ProcessingStage.AI_ANALYSIS,
1000 // Get count only
);
// Filter to sessions for this company
const companySessionsNeedingAI = sessionsNeedingAI.filter(
statusRecord => statusRecord.session.companyId === user.companyId
);
const unprocessedCount = companySessionsNeedingAI.length;
if (unprocessedCount === 0) {
return NextResponse.json({
success: true,
message: "No sessions requiring AI processing found",
unprocessedCount: 0,
processedCount: 0,
});
}
// Start processing (this will run asynchronously)
const startTime = Date.now();
// Note: We're calling the function but not awaiting it to avoid timeout
// The processing will continue in the background
processUnprocessedSessions(validatedBatchSize, validatedMaxConcurrency)
.then(() => {
console.log(`[Manual Trigger] Processing completed for company ${user.companyId}`);
})
.catch((error) => {
console.error(`[Manual Trigger] Processing failed for company ${user.companyId}:`, error);
});
return NextResponse.json({
success: true,
message: `Started processing ${unprocessedCount} unprocessed sessions`,
unprocessedCount,
batchSize: validatedBatchSize || unprocessedCount,
maxConcurrency: validatedMaxConcurrency,
startedAt: new Date().toISOString(),
});
} catch (error) {
console.error("[Manual Trigger] Error:", error);
return NextResponse.json(
{
error: "Failed to trigger processing",
details: error instanceof Error ? error.message : String(error),
},
{ status: 500 }
);
}
}

View File

@ -1,106 +0,0 @@
import NextAuth, { NextAuthOptions } from "next-auth";
import CredentialsProvider from "next-auth/providers/credentials";
import { prisma } from "../../../../lib/prisma";
import bcrypt from "bcryptjs";
// Define the shape of the JWT token
declare module "next-auth/jwt" {
interface JWT {
companyId: string;
role: string;
}
}
// Define the shape of the session object
declare module "next-auth" {
interface Session {
user: {
id?: string;
name?: string;
email?: string;
image?: string;
companyId: string;
role: string;
};
}
interface User {
id: string;
email: string;
companyId: string;
role: string;
}
}
export const authOptions: NextAuthOptions = {
providers: [
CredentialsProvider({
name: "Credentials",
credentials: {
email: { label: "Email", type: "text" },
password: { label: "Password", type: "password" },
},
async authorize(credentials) {
if (!credentials?.email || !credentials?.password) {
return null;
}
const user = await prisma.user.findUnique({
where: { email: credentials.email },
});
if (!user) return null;
const valid = await bcrypt.compare(credentials.password, user.password);
if (!valid) return null;
return {
id: user.id,
email: user.email,
companyId: user.companyId,
role: user.role,
};
},
}),
],
session: {
strategy: "jwt",
maxAge: 30 * 24 * 60 * 60, // 30 days
},
cookies: {
sessionToken: {
name: `next-auth.session-token`,
options: {
httpOnly: true,
sameSite: "lax",
path: "/",
secure: process.env.NODE_ENV === "production",
},
},
},
callbacks: {
async jwt({ token, user }) {
if (user) {
token.companyId = user.companyId;
token.role = user.role;
}
return token;
},
async session({ session, token }) {
if (token && session.user) {
session.user.companyId = token.companyId;
session.user.role = token.role;
}
return session;
},
},
pages: {
signIn: "/login",
},
secret: process.env.NEXTAUTH_SECRET,
debug: process.env.NODE_ENV === "development",
};
const handler = NextAuth(authOptions);
export { handler as GET, handler as POST };

View File

@ -1,51 +0,0 @@
import { NextRequest, NextResponse } from "next/server";
import { getServerSession } from "next-auth";
import { prisma } from "../../../../lib/prisma";
import { authOptions } from "../../auth/[...nextauth]/route";
export async function GET(request: NextRequest) {
const session = await getServerSession(authOptions);
if (!session?.user) {
return NextResponse.json({ error: "Not logged in" }, { status: 401 });
}
const user = await prisma.user.findUnique({
where: { email: session.user.email as string },
});
if (!user) {
return NextResponse.json({ error: "No user" }, { status: 401 });
}
// Get company data
const company = await prisma.company.findUnique({
where: { id: user.companyId },
});
return NextResponse.json({ company });
}
export async function POST(request: NextRequest) {
const session = await getServerSession(authOptions);
if (!session?.user) {
return NextResponse.json({ error: "Not logged in" }, { status: 401 });
}
const user = await prisma.user.findUnique({
where: { email: session.user.email as string },
});
if (!user) {
return NextResponse.json({ error: "No user" }, { status: 401 });
}
const body = await request.json();
const { csvUrl } = body;
await prisma.company.update({
where: { id: user.companyId },
data: { csvUrl },
});
return NextResponse.json({ ok: true });
}

View File

@ -1,138 +0,0 @@
import { NextRequest, NextResponse } from "next/server";
import { getServerSession } from "next-auth";
import { prisma } from "../../../../lib/prisma";
import { sessionMetrics } from "../../../../lib/metrics";
import { authOptions } from "../../auth/[...nextauth]/route";
import { ChatSession } from "../../../../lib/types";
interface SessionUser {
email: string;
name?: string;
}
interface SessionData {
user: SessionUser;
}
export async function GET(request: NextRequest) {
const session = (await getServerSession(authOptions)) as SessionData | null;
if (!session?.user) {
return NextResponse.json({ error: "Not logged in" }, { status: 401 });
}
const user = await prisma.user.findUnique({
where: { email: session.user.email },
include: { company: true },
});
if (!user) {
return NextResponse.json({ error: "No user" }, { status: 401 });
}
// Get date range from query parameters
const { searchParams } = new URL(request.url);
const startDate = searchParams.get("startDate");
const endDate = searchParams.get("endDate");
// Build where clause with optional date filtering
const whereClause: any = {
companyId: user.companyId,
};
if (startDate && endDate) {
whereClause.startTime = {
gte: new Date(startDate),
lte: new Date(endDate + 'T23:59:59.999Z'), // Include full end date
};
}
const prismaSessions = await prisma.session.findMany({
where: whereClause,
include: {
messages: true, // Include messages for question extraction
},
});
// Convert Prisma sessions to ChatSession[] type for sessionMetrics
const chatSessions: ChatSession[] = prismaSessions.map((ps) => ({
id: ps.id, // Map Prisma's id to ChatSession.id
sessionId: ps.id, // Map Prisma's id to ChatSession.sessionId
companyId: ps.companyId,
startTime: new Date(ps.startTime), // Ensure startTime is a Date object
endTime: ps.endTime ? new Date(ps.endTime) : null, // Ensure endTime is a Date object or null
transcriptContent: "", // Session model doesn't have transcriptContent field
createdAt: new Date(ps.createdAt), // Map Prisma's createdAt
updatedAt: new Date(ps.createdAt), // Use createdAt for updatedAt as Session model doesn't have updatedAt
category: ps.category || undefined,
language: ps.language || undefined,
country: ps.country || undefined,
ipAddress: ps.ipAddress || undefined,
sentiment: ps.sentiment === null ? undefined : ps.sentiment,
messagesSent: ps.messagesSent === null ? undefined : ps.messagesSent, // Handle null messagesSent
avgResponseTime:
ps.avgResponseTime === null ? undefined : ps.avgResponseTime,
escalated: ps.escalated || false,
forwardedHr: ps.forwardedHr || false,
initialMsg: ps.initialMsg || undefined,
fullTranscriptUrl: ps.fullTranscriptUrl || undefined,
summary: ps.summary || undefined, // Include summary field
messages: ps.messages || [], // Include messages for question extraction
// userId is missing in Prisma Session model, assuming it's not strictly needed for metrics or can be null
userId: undefined, // Or some other default/mapping if available
}));
// Pass company config to metrics
const companyConfigForMetrics = {
sentimentAlert:
user.company.sentimentAlert === null
? undefined
: user.company.sentimentAlert,
};
const metrics = sessionMetrics(chatSessions, companyConfigForMetrics);
// Calculate date range from the FILTERED sessions to match what's actually displayed
let dateRange: { minDate: string; maxDate: string } | null = null;
let availableDataRange: { minDate: string; maxDate: string } | null = null;
// Get the full available range for reference
const allSessions = await prisma.session.findMany({
where: {
companyId: user.companyId,
},
select: {
startTime: true,
},
orderBy: {
startTime: 'asc',
},
});
if (allSessions.length > 0) {
availableDataRange = {
minDate: allSessions[0].startTime.toISOString().split('T')[0], // First session date
maxDate: allSessions[allSessions.length - 1].startTime.toISOString().split('T')[0] // Last session date
};
}
// Calculate date range from the filtered sessions (what's actually being displayed)
if (prismaSessions.length > 0) {
const sortedFilteredSessions = prismaSessions.sort((a, b) =>
new Date(a.startTime).getTime() - new Date(b.startTime).getTime()
);
dateRange = {
minDate: sortedFilteredSessions[0].startTime.toISOString().split('T')[0],
maxDate: sortedFilteredSessions[sortedFilteredSessions.length - 1].startTime.toISOString().split('T')[0]
};
} else if (availableDataRange) {
// If no filtered sessions but we have available data, use the available range
dateRange = availableDataRange;
}
return NextResponse.json({
metrics,
csvUrl: user.company.csvUrl,
company: user.company,
dateRange,
});
}

View File

@ -1,71 +0,0 @@
import { NextRequest, NextResponse } from "next/server";
import { getServerSession } from "next-auth/next";
import { authOptions } from "../../auth/[...nextauth]/route";
import { prisma } from "../../../../lib/prisma";
import { SessionFilterOptions } from "../../../../lib/types";
export async function GET(request: NextRequest) {
const authSession = await getServerSession(authOptions);
if (!authSession || !authSession.user?.companyId) {
return NextResponse.json({ error: "Unauthorized" }, { status: 401 });
}
const companyId = authSession.user.companyId;
try {
const categories = await prisma.session.findMany({
where: {
companyId,
category: {
not: null, // Ensure category is not null
},
},
distinct: ["category"],
select: {
category: true,
},
orderBy: {
category: "asc",
},
});
const languages = await prisma.session.findMany({
where: {
companyId,
language: {
not: null, // Ensure language is not null
},
},
distinct: ["language"],
select: {
language: true,
},
orderBy: {
language: "asc",
},
});
const distinctCategories = categories
.map((s) => s.category)
.filter(Boolean) as string[]; // Filter out any nulls and assert as string[]
const distinctLanguages = languages
.map((s) => s.language)
.filter(Boolean) as string[]; // Filter out any nulls and assert as string[]
return NextResponse.json({
categories: distinctCategories,
languages: distinctLanguages
});
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "An unknown error occurred";
return NextResponse.json(
{
error: "Failed to fetch filter options",
details: errorMessage,
},
{ status: 500 }
);
}
}

View File

@ -1,85 +0,0 @@
import { NextRequest, NextResponse } from "next/server";
import { prisma } from "../../../../../lib/prisma";
import { ChatSession } from "../../../../../lib/types";
export async function GET(
request: NextRequest,
{ params }: { params: { id: string } }
) {
const { id } = params;
if (!id) {
return NextResponse.json(
{ error: "Session ID is required" },
{ status: 400 }
);
}
try {
const prismaSession = await prisma.session.findUnique({
where: { id },
include: {
messages: {
orderBy: { order: "asc" },
},
},
});
if (!prismaSession) {
return NextResponse.json(
{ error: "Session not found" },
{ status: 404 }
);
}
// Map Prisma session object to ChatSession type
const session: ChatSession = {
// Spread prismaSession to include all its properties
...prismaSession,
// Override properties that need conversion or specific mapping
id: prismaSession.id, // ChatSession.id from Prisma.Session.id
sessionId: prismaSession.id, // ChatSession.sessionId from Prisma.Session.id
startTime: new Date(prismaSession.startTime),
endTime: prismaSession.endTime ? new Date(prismaSession.endTime) : null,
createdAt: new Date(prismaSession.createdAt),
// Prisma.Session does not have an `updatedAt` field. We'll use `createdAt` as a fallback.
// Or, if your business logic implies an update timestamp elsewhere, use that.
updatedAt: new Date(prismaSession.createdAt), // Fallback to createdAt
// Prisma.Session does not have a `userId` field.
userId: null, // Explicitly set to null or map if available from another source
// Ensure nullable fields from Prisma are correctly mapped to ChatSession's optional or nullable fields
category: prismaSession.category ?? null,
language: prismaSession.language ?? null,
country: prismaSession.country ?? null,
ipAddress: prismaSession.ipAddress ?? null,
sentiment: prismaSession.sentiment ?? null,
messagesSent: prismaSession.messagesSent ?? undefined, // Use undefined if ChatSession expects number | undefined
avgResponseTime: prismaSession.avgResponseTime ?? null,
escalated: prismaSession.escalated ?? undefined,
forwardedHr: prismaSession.forwardedHr ?? undefined,
initialMsg: prismaSession.initialMsg ?? undefined,
fullTranscriptUrl: prismaSession.fullTranscriptUrl ?? null,
summary: prismaSession.summary ?? null, // New field
transcriptContent: null, // Not available in Session model
messages:
prismaSession.messages?.map((msg) => ({
id: msg.id,
sessionId: msg.sessionId,
timestamp: msg.timestamp ? new Date(msg.timestamp) : new Date(),
role: msg.role,
content: msg.content,
order: msg.order,
createdAt: new Date(msg.createdAt),
})) ?? [], // New field - parsed messages
};
return NextResponse.json({ session });
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "An unknown error occurred";
return NextResponse.json(
{ error: "Failed to fetch session", details: errorMessage },
{ status: 500 }
);
}
}

View File

@ -1,149 +0,0 @@
import { NextRequest, NextResponse } from "next/server";
import { getServerSession } from "next-auth/next";
import { authOptions } from "../../auth/[...nextauth]/route";
import { prisma } from "../../../../lib/prisma";
import {
ChatSession,
SessionApiResponse,
SessionQuery,
} from "../../../../lib/types";
import { Prisma } from "@prisma/client";
export async function GET(request: NextRequest) {
const authSession = await getServerSession(authOptions);
if (!authSession || !authSession.user?.companyId) {
return NextResponse.json({ error: "Unauthorized" }, { status: 401 });
}
const companyId = authSession.user.companyId;
const { searchParams } = new URL(request.url);
const searchTerm = searchParams.get("searchTerm");
const category = searchParams.get("category");
const language = searchParams.get("language");
const startDate = searchParams.get("startDate");
const endDate = searchParams.get("endDate");
const sortKey = searchParams.get("sortKey");
const sortOrder = searchParams.get("sortOrder");
const queryPage = searchParams.get("page");
const queryPageSize = searchParams.get("pageSize");
const page = Number(queryPage) || 1;
const pageSize = Number(queryPageSize) || 10;
try {
const whereClause: Prisma.SessionWhereInput = { companyId };
// Search Term
if (searchTerm && searchTerm.trim() !== "") {
const searchConditions = [
{ id: { contains: searchTerm } },
{ initialMsg: { contains: searchTerm } },
{ summary: { contains: searchTerm } },
];
whereClause.OR = searchConditions;
}
// Category Filter
if (category && category.trim() !== "") {
// Cast to SessionCategory enum if it's a valid value
whereClause.category = category as any;
}
// Language Filter
if (language && language.trim() !== "") {
whereClause.language = language;
}
// Date Range Filter
if (startDate) {
whereClause.startTime = {
...((whereClause.startTime as object) || {}),
gte: new Date(startDate),
};
}
if (endDate) {
const inclusiveEndDate = new Date(endDate);
inclusiveEndDate.setDate(inclusiveEndDate.getDate() + 1);
whereClause.startTime = {
...((whereClause.startTime as object) || {}),
lt: inclusiveEndDate,
};
}
// Sorting
const validSortKeys: { [key: string]: string } = {
startTime: "startTime",
category: "category",
language: "language",
sentiment: "sentiment",
messagesSent: "messagesSent",
avgResponseTime: "avgResponseTime",
};
let orderByCondition:
| Prisma.SessionOrderByWithRelationInput
| Prisma.SessionOrderByWithRelationInput[];
const primarySortField =
sortKey && validSortKeys[sortKey]
? validSortKeys[sortKey]
: "startTime"; // Default to startTime field if sortKey is invalid/missing
const primarySortOrder =
sortOrder === "asc" || sortOrder === "desc" ? sortOrder : "desc"; // Default to desc order
if (primarySortField === "startTime") {
// If sorting by startTime, it's the only sort criteria
orderByCondition = { [primarySortField]: primarySortOrder };
} else {
// If sorting by another field, use startTime: "desc" as secondary sort
orderByCondition = [
{ [primarySortField]: primarySortOrder },
{ startTime: "desc" },
];
}
const prismaSessions = await prisma.session.findMany({
where: whereClause,
orderBy: orderByCondition,
skip: (page - 1) * pageSize,
take: pageSize,
});
const totalSessions = await prisma.session.count({ where: whereClause });
const sessions: ChatSession[] = prismaSessions.map((ps) => ({
id: ps.id,
sessionId: ps.id,
companyId: ps.companyId,
startTime: new Date(ps.startTime),
endTime: ps.endTime ? new Date(ps.endTime) : null,
createdAt: new Date(ps.createdAt),
updatedAt: new Date(ps.createdAt),
userId: null,
category: ps.category ?? null,
language: ps.language ?? null,
country: ps.country ?? null,
ipAddress: ps.ipAddress ?? null,
sentiment: ps.sentiment ?? null,
messagesSent: ps.messagesSent ?? undefined,
avgResponseTime: ps.avgResponseTime ?? null,
escalated: ps.escalated ?? undefined,
forwardedHr: ps.forwardedHr ?? undefined,
initialMsg: ps.initialMsg ?? undefined,
fullTranscriptUrl: ps.fullTranscriptUrl ?? null,
transcriptContent: null, // Transcript content is now fetched from fullTranscriptUrl when needed
}));
return NextResponse.json({ sessions, totalSessions });
} catch (error) {
const errorMessage =
error instanceof Error ? error.message : "An unknown error occurred";
return NextResponse.json(
{ error: "Failed to fetch sessions", details: errorMessage },
{ status: 500 }
);
}
}

View File

@ -1,36 +0,0 @@
import { NextRequest, NextResponse } from "next/server";
import { getServerSession } from "next-auth";
import { prisma } from "../../../../lib/prisma";
import { authOptions } from "../../auth/[...nextauth]/route";
export async function POST(request: NextRequest) {
const session = await getServerSession(authOptions);
if (!session?.user || session.user.role !== "ADMIN") {
return NextResponse.json({ error: "Forbidden" }, { status: 403 });
}
const user = await prisma.user.findUnique({
where: { email: session.user.email as string },
});
if (!user) {
return NextResponse.json({ error: "No user" }, { status: 401 });
}
const body = await request.json();
const { csvUrl, csvUsername, csvPassword, sentimentThreshold } = body;
await prisma.company.update({
where: { id: user.companyId },
data: {
csvUrl,
csvUsername,
...(csvPassword ? { csvPassword } : {}),
sentimentAlert: sentimentThreshold
? parseFloat(sentimentThreshold)
: null,
},
});
return NextResponse.json({ ok: true });
}

View File

@ -1,80 +0,0 @@
import { NextRequest, NextResponse } from "next/server";
import crypto from "crypto";
import { getServerSession } from "next-auth";
import { prisma } from "../../../../lib/prisma";
import bcrypt from "bcryptjs";
import { authOptions } from "../../auth/[...nextauth]/route";
interface UserBasicInfo {
id: string;
email: string;
role: string;
}
export async function GET(request: NextRequest) {
const session = await getServerSession(authOptions);
if (!session?.user || session.user.role !== "ADMIN") {
return NextResponse.json({ error: "Forbidden" }, { status: 403 });
}
const user = await prisma.user.findUnique({
where: { email: session.user.email as string },
});
if (!user) {
return NextResponse.json({ error: "No user" }, { status: 401 });
}
const users = await prisma.user.findMany({
where: { companyId: user.companyId },
});
const mappedUsers: UserBasicInfo[] = users.map((u) => ({
id: u.id,
email: u.email,
role: u.role,
}));
return NextResponse.json({ users: mappedUsers });
}
export async function POST(request: NextRequest) {
const session = await getServerSession(authOptions);
if (!session?.user || session.user.role !== "ADMIN") {
return NextResponse.json({ error: "Forbidden" }, { status: 403 });
}
const user = await prisma.user.findUnique({
where: { email: session.user.email as string },
});
if (!user) {
return NextResponse.json({ error: "No user" }, { status: 401 });
}
const body = await request.json();
const { email, role } = body;
if (!email || !role) {
return NextResponse.json({ error: "Missing fields" }, { status: 400 });
}
const exists = await prisma.user.findUnique({ where: { email } });
if (exists) {
return NextResponse.json({ error: "Email exists" }, { status: 409 });
}
const tempPassword = crypto.randomBytes(12).toString("base64").slice(0, 12); // secure random initial password
await prisma.user.create({
data: {
email,
password: await bcrypt.hash(tempPassword, 10),
companyId: user.companyId,
role,
},
});
// TODO: Email user their temp password (stub, for demo) - Implement a robust and secure email sending mechanism. Consider using a transactional email service.
return NextResponse.json({ ok: true, tempPassword });
}

View File

@ -1,28 +0,0 @@
import { NextRequest, NextResponse } from "next/server";
import { prisma } from "../../../lib/prisma";
import { sendEmail } from "../../../lib/sendEmail";
import crypto from "crypto";
export async function POST(request: NextRequest) {
const body = await request.json();
const { email } = body as { email: string };
const user = await prisma.user.findUnique({ where: { email } });
if (!user) {
// Always return 200 for privacy (don't reveal if email exists)
return NextResponse.json({ success: true }, { status: 200 });
}
const token = crypto.randomBytes(32).toString("hex");
const expiry = new Date(Date.now() + 1000 * 60 * 30); // 30 min expiry
await prisma.user.update({
where: { email },
data: { resetToken: token, resetTokenExpiry: expiry },
});
const resetUrl = `${process.env.NEXTAUTH_URL || "http://localhost:3000"}/reset-password?token=${token}`;
await sendEmail(email, "Password Reset", `Reset your password: ${resetUrl}`);
return NextResponse.json({ success: true }, { status: 200 });
}

View File

@ -1,63 +0,0 @@
import { NextRequest, NextResponse } from "next/server";
import { prisma } from "../../../lib/prisma";
import bcrypt from "bcryptjs";
interface RegisterRequestBody {
email: string;
password: string;
company: string;
csvUrl?: string;
}
export async function POST(request: NextRequest) {
const body = await request.json();
const { email, password, company, csvUrl } = body as RegisterRequestBody;
if (!email || !password || !company) {
return NextResponse.json(
{
success: false,
error: "Missing required fields",
},
{ status: 400 }
);
}
// Check if email exists
const exists = await prisma.user.findUnique({
where: { email },
});
if (exists) {
return NextResponse.json(
{
success: false,
error: "Email already exists",
},
{ status: 409 }
);
}
const newCompany = await prisma.company.create({
data: { name: company, csvUrl: csvUrl || "" },
});
const hashed = await bcrypt.hash(password, 10);
await prisma.user.create({
data: {
email,
password: hashed,
companyId: newCompany.id,
role: "ADMIN",
},
});
return NextResponse.json(
{
success: true,
data: { success: true },
},
{ status: 201 }
);
}

View File

@ -1,63 +0,0 @@
import { NextRequest, NextResponse } from "next/server";
import { prisma } from "../../../lib/prisma";
import bcrypt from "bcryptjs";
export async function POST(request: NextRequest) {
const body = await request.json();
const { token, password } = body as { token?: string; password?: string };
if (!token || !password) {
return NextResponse.json(
{ error: "Token and password are required." },
{ status: 400 }
);
}
if (password.length < 8) {
return NextResponse.json(
{ error: "Password must be at least 8 characters long." },
{ status: 400 }
);
}
try {
const user = await prisma.user.findFirst({
where: {
resetToken: token,
resetTokenExpiry: { gte: new Date() },
},
});
if (!user) {
return NextResponse.json(
{
error: "Invalid or expired token. Please request a new password reset.",
},
{ status: 400 }
);
}
const hash = await bcrypt.hash(password, 10);
await prisma.user.update({
where: { id: user.id },
data: {
password: hash,
resetToken: null,
resetTokenExpiry: null,
},
});
return NextResponse.json(
{ message: "Password has been reset successfully." },
{ status: 200 }
);
} catch (error) {
console.error("Reset password error:", error);
return NextResponse.json(
{
error: "An internal server error occurred. Please try again later.",
},
{ status: 500 }
);
}
}

View File

@ -77,8 +77,8 @@ export default function CompanySettingsPage() {
return <div className="text-center py-10">Loading settings...</div>; return <div className="text-center py-10">Loading settings...</div>;
} }
// Check for ADMIN access // Check for admin access
if (session?.user?.role !== "ADMIN") { if (session?.user?.role !== "admin") {
return ( return (
<div className="text-center py-10 bg-white rounded-xl shadow p-6"> <div className="text-center py-10 bg-white rounded-xl shadow p-6">
<h2 className="font-bold text-xl text-red-600 mb-2">Access Denied</h2> <h2 className="font-bold text-xl text-red-600 mb-2">Access Denied</h2>

View File

@ -1,117 +1,101 @@
"use client"; "use client";
import { useEffect, useState, useCallback, useRef } from "react"; import { useEffect, useState, useCallback } from "react";
import { signOut, useSession } from "next-auth/react"; import { signOut, useSession } from "next-auth/react";
import { useRouter } from "next/navigation"; import { useRouter } from "next/navigation";
import {
SessionsLineChart,
CategoriesBarChart,
LanguagePieChart,
TokenUsageChart,
} from "../../../components/Charts";
import { Company, MetricsResult, WordCloudWord } from "../../../lib/types"; import { Company, MetricsResult, WordCloudWord } from "../../../lib/types";
import MetricCard from "../../../components/ui/metric-card"; import MetricCard from "../../../components/MetricCard";
import ModernLineChart from "../../../components/charts/line-chart"; import DonutChart from "../../../components/DonutChart";
import ModernBarChart from "../../../components/charts/bar-chart";
import ModernDonutChart from "../../../components/charts/donut-chart";
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
import { Button } from "@/components/ui/button";
import { Badge } from "@/components/ui/badge";
import { Skeleton } from "@/components/ui/skeleton";
import { Separator } from "@/components/ui/separator";
import {
DropdownMenu,
DropdownMenuContent,
DropdownMenuItem,
DropdownMenuTrigger,
} from "@/components/ui/dropdown-menu";
import {
MessageSquare,
Users,
Clock,
Zap,
Euro,
TrendingUp,
CheckCircle,
RefreshCw,
LogOut,
Calendar,
MoreVertical,
Globe,
MessageCircle,
} from "lucide-react";
import WordCloud from "../../../components/WordCloud"; import WordCloud from "../../../components/WordCloud";
import GeographicMap from "../../../components/GeographicMap"; import GeographicMap from "../../../components/GeographicMap";
import ResponseTimeDistribution from "../../../components/ResponseTimeDistribution"; import ResponseTimeDistribution from "../../../components/ResponseTimeDistribution";
import WelcomeBanner from "../../../components/WelcomeBanner";
import DateRangePicker from "../../../components/DateRangePicker"; import DateRangePicker from "../../../components/DateRangePicker";
import TopQuestionsChart from "../../../components/TopQuestionsChart"; import TopQuestionsChart from "../../../components/TopQuestionsChart";
// Safely wrapped component with useSession // Safely wrapped component with useSession
function DashboardContent() { function DashboardContent() {
const { data: session, status } = useSession(); const { data: session, status } = useSession(); // Add status from useSession
const router = useRouter(); const router = useRouter(); // Initialize useRouter
const [metrics, setMetrics] = useState<MetricsResult | null>(null); const [metrics, setMetrics] = useState<MetricsResult | null>(null);
const [company, setCompany] = useState<Company | null>(null); const [company, setCompany] = useState<Company | null>(null);
const [loading, setLoading] = useState<boolean>(false); const [, setLoading] = useState<boolean>(false);
const [refreshing, setRefreshing] = useState<boolean>(false); const [refreshing, setRefreshing] = useState<boolean>(false);
const [dateRange, setDateRange] = useState<{ minDate: string; maxDate: string } | null>(null); const [dateRange, setDateRange] = useState<{
minDate: string;
maxDate: string;
} | null>(null);
const [selectedStartDate, setSelectedStartDate] = useState<string>(""); const [selectedStartDate, setSelectedStartDate] = useState<string>("");
const [selectedEndDate, setSelectedEndDate] = useState<string>(""); const [selectedEndDate, setSelectedEndDate] = useState<string>("");
const [isInitialLoad, setIsInitialLoad] = useState<boolean>(true);
const isAuditor = session?.user?.role === "AUDITOR"; const isAuditor = session?.user?.role === "auditor";
// Function to fetch metrics with optional date range // Function to fetch metrics with optional date range
const fetchMetrics = async (startDate?: string, endDate?: string, isInitial = false) => { const fetchMetrics = useCallback(
setLoading(true); async (startDate?: string, endDate?: string) => {
try { setLoading(true);
let url = "/api/dashboard/metrics"; try {
if (startDate && endDate) { let url = "/api/dashboard/metrics";
url += `?startDate=${startDate}&endDate=${endDate}`; if (startDate && endDate) {
url += `?startDate=${startDate}&endDate=${endDate}`;
}
const res = await fetch(url);
const data = await res.json();
setMetrics(data.metrics);
setCompany(data.company);
// Set date range from API response (only on initial load)
if (data.dateRange && !dateRange) {
setDateRange(data.dateRange);
setSelectedStartDate(data.dateRange.minDate);
setSelectedEndDate(data.dateRange.maxDate);
}
} catch (error) {
console.error("Error fetching metrics:", error);
} finally {
setLoading(false);
} }
},
[dateRange]
);
const res = await fetch(url); // Handle date range changes
const data = await res.json(); const handleDateRangeChange = useCallback(
(startDate: string, endDate: string) => {
setMetrics(data.metrics);
setCompany(data.company);
// Set date range from API response (only on initial load)
if (data.dateRange && isInitial) {
setDateRange(data.dateRange);
setSelectedStartDate(data.dateRange.minDate);
setSelectedEndDate(data.dateRange.maxDate);
setIsInitialLoad(false);
}
} catch (error) {
console.error("Error fetching metrics:", error);
} finally {
setLoading(false);
}
};
// Handle date range changes with proper memoization
const handleDateRangeChange = useCallback((startDate: string, endDate: string) => {
// Only update if dates actually changed to prevent unnecessary API calls
if (startDate !== selectedStartDate || endDate !== selectedEndDate) {
setSelectedStartDate(startDate); setSelectedStartDate(startDate);
setSelectedEndDate(endDate); setSelectedEndDate(endDate);
fetchMetrics(startDate, endDate); fetchMetrics(startDate, endDate);
} },
}, [selectedStartDate, selectedEndDate]); [fetchMetrics]
);
useEffect(() => { useEffect(() => {
// Redirect if not authenticated // Redirect if not authenticated
if (status === "unauthenticated") { if (status === "unauthenticated") {
router.push("/login"); router.push("/login");
return; return; // Stop further execution in this effect
} }
// Fetch metrics and company on mount if authenticated // Fetch metrics and company on mount if authenticated
if (status === "authenticated" && isInitialLoad) { if (status === "authenticated") {
fetchMetrics(undefined, undefined, true); fetchMetrics();
} }
}, [status, router, isInitialLoad]); }, [status, router, fetchMetrics]); // Add fetchMetrics to dependency array
async function handleRefresh() { async function handleRefresh() {
if (isAuditor) return; if (isAuditor) return; // Prevent auditors from refreshing
try { try {
setRefreshing(true); setRefreshing(true);
// Make sure we have a company ID to send
if (!company?.id) { if (!company?.id) {
setRefreshing(false); setRefreshing(false);
alert("Cannot refresh: Company ID is missing"); alert("Cannot refresh: Company ID is missing");
@ -125,6 +109,7 @@ function DashboardContent() {
}); });
if (res.ok) { if (res.ok) {
// Refetch metrics
const metricsRes = await fetch("/api/dashboard/metrics"); const metricsRes = await fetch("/api/dashboard/metrics");
const data = await metricsRes.json(); const data = await metricsRes.json();
setMetrics(data.metrics); setMetrics(data.metrics);
@ -137,129 +122,70 @@ function DashboardContent() {
} }
} }
// Calculate sentiment distribution
const getSentimentData = () => {
if (!metrics) return { positive: 0, neutral: 0, negative: 0 };
if (
metrics.sentimentPositiveCount !== undefined &&
metrics.sentimentNeutralCount !== undefined &&
metrics.sentimentNegativeCount !== undefined
) {
return {
positive: metrics.sentimentPositiveCount,
neutral: metrics.sentimentNeutralCount,
negative: metrics.sentimentNegativeCount,
};
}
const total = metrics.totalSessions || 1;
return {
positive: Math.round(total * 0.6),
neutral: Math.round(total * 0.3),
negative: Math.round(total * 0.1),
};
};
// Prepare token usage data
const getTokenData = () => {
if (!metrics || !metrics.tokensByDay) {
return { labels: [], values: [], costs: [] };
}
const days = Object.keys(metrics.tokensByDay).sort();
const labels = days.slice(-7);
const values = labels.map((day) => metrics.tokensByDay?.[day] || 0);
const costs = labels.map((day) => metrics.tokensCostByDay?.[day] || 0);
return { labels, values, costs };
};
// Show loading state while session status is being determined // Show loading state while session status is being determined
if (status === "loading") { if (status === "loading") {
return ( return <div className="text-center py-10">Loading session...</div>;
<div className="flex items-center justify-center min-h-[60vh]">
<div className="text-center space-y-4">
<div className="animate-spin rounded-full h-12 w-12 border-b-2 border-primary mx-auto"></div>
<p className="text-muted-foreground">Loading session...</p>
</div>
</div>
);
} }
// If unauthenticated and not redirected yet (should be handled by useEffect, but as a fallback)
if (status === "unauthenticated") { if (status === "unauthenticated") {
return ( return <div className="text-center py-10">Redirecting to login...</div>;
<div className="flex items-center justify-center min-h-[60vh]">
<div className="text-center">
<p className="text-muted-foreground">Redirecting to login...</p>
</div>
</div>
);
} }
if (loading || !metrics || !company) { if (!metrics || !company) {
return ( return <div className="text-center py-10">Loading dashboard...</div>;
<div className="space-y-8">
{/* Header Skeleton */}
<Card>
<CardHeader>
<div className="flex justify-between items-start">
<div className="space-y-2">
<Skeleton className="h-8 w-48" />
<Skeleton className="h-4 w-64" />
</div>
<div className="flex gap-2">
<Skeleton className="h-10 w-24" />
<Skeleton className="h-10 w-20" />
</div>
</div>
</CardHeader>
</Card>
{/* Metrics Grid Skeleton */}
<div className="grid grid-cols-1 sm:grid-cols-2 lg:grid-cols-4 gap-6">
{Array.from({ length: 8 }).map((_, i) => (
<MetricCard key={i} title="" value="" isLoading />
))}
</div>
{/* Charts Skeleton */}
<div className="grid grid-cols-1 lg:grid-cols-3 gap-6">
<Card className="lg:col-span-2">
<CardHeader>
<Skeleton className="h-6 w-32" />
</CardHeader>
<CardContent>
<Skeleton className="h-64 w-full" />
</CardContent>
</Card>
<Card>
<CardHeader>
<Skeleton className="h-6 w-32" />
</CardHeader>
<CardContent>
<Skeleton className="h-64 w-full" />
</CardContent>
</Card>
</div>
</div>
);
} }
// Data preparation functions // Function to prepare word cloud data from metrics.wordCloudData
const getSentimentData = () => {
if (!metrics) return [];
const sentimentData = {
positive: metrics.sentimentPositiveCount ?? 0,
neutral: metrics.sentimentNeutralCount ?? 0,
negative: metrics.sentimentNegativeCount ?? 0,
};
return [
{ name: "Positive", value: sentimentData.positive, color: "rgb(34, 197, 94)" },
{ name: "Neutral", value: sentimentData.neutral, color: "rgb(168, 162, 158)" },
{ name: "Negative", value: sentimentData.negative, color: "rgb(239, 68, 68)" },
];
};
const getSessionsOverTimeData = () => {
if (!metrics?.days) return [];
return Object.entries(metrics.days).map(([date, value]) => ({
date: new Date(date).toLocaleDateString('en-US', { month: 'short', day: 'numeric' }),
value: value as number,
}));
};
const getCategoriesData = () => {
if (!metrics?.categories) return [];
return Object.entries(metrics.categories).map(([name, value]) => ({
name: name.length > 15 ? name.substring(0, 15) + '...' : name,
value: value as number,
}));
};
const getLanguagesData = () => {
if (!metrics?.languages) return [];
return Object.entries(metrics.languages).map(([name, value]) => ({
name,
value: value as number,
}));
};
const getWordCloudData = (): WordCloudWord[] => { const getWordCloudData = (): WordCloudWord[] => {
if (!metrics?.wordCloudData) return []; if (!metrics || !metrics.wordCloudData) return [];
return metrics.wordCloudData; return metrics.wordCloudData;
}; };
// Function to prepare country data for the map using actual metrics
const getCountryData = () => { const getCountryData = () => {
if (!metrics?.countries) return {}; if (!metrics || !metrics.countries) return {};
return Object.entries(metrics.countries).reduce(
// Convert the countries object from metrics to the format expected by GeographicMap
const result = Object.entries(metrics.countries).reduce(
(acc, [code, count]) => { (acc, [code, count]) => {
if (code && count) { if (code && count) {
acc[code] = count; acc[code] = count;
@ -268,8 +194,11 @@ function DashboardContent() {
}, },
{} as Record<string, number> {} as Record<string, number>
); );
return result;
}; };
// Function to prepare response time distribution data
const getResponseTimeData = () => { const getResponseTimeData = () => {
const avgTime = metrics.avgResponseTime || 1.5; const avgTime = metrics.avgResponseTime || 1.5;
const simulatedData: number[] = []; const simulatedData: number[] = [];
@ -284,318 +213,331 @@ function DashboardContent() {
return ( return (
<div className="space-y-8"> <div className="space-y-8">
{/* Apple-Style Unified Header */} <WelcomeBanner companyName={company.name} />
<Card className="border-0 bg-white shadow-sm"> <div className="flex flex-col sm:flex-row justify-between items-start sm:items-center bg-white p-6 rounded-2xl shadow-lg ring-1 ring-slate-200/50">
<CardHeader className="pb-6"> <div>
<div className="flex flex-col space-y-6"> <h1 className="text-3xl font-bold text-slate-800">{company.name}</h1>
{/* Top row: Company info and actions */} <p className="text-slate-500 mt-1">
<div className="flex flex-col sm:flex-row justify-between items-start sm:items-center gap-4"> Dashboard updated{" "}
<div className="space-y-1"> <span className="font-medium text-slate-600">
<div className="flex items-center gap-3"> {new Date(metrics.lastUpdated || Date.now()).toLocaleString()}
<h1 className="text-2xl font-semibold text-gray-900 tracking-tight">{company.name}</h1> </span>
<Badge variant="secondary" className="text-xs font-medium bg-gray-100 text-gray-700 border-0"> </p>
Analytics Dashboard </div>
</Badge> <div className="flex items-center gap-3 mt-4 sm:mt-0">
</div> <button
<p className="text-sm text-gray-500"> className="bg-sky-600 text-white py-2 px-5 rounded-lg shadow hover:bg-sky-700 transition-colors disabled:opacity-60 disabled:cursor-not-allowed flex items-center text-sm font-medium"
Last updated{" "} onClick={handleRefresh}
<span className="font-medium text-gray-700"> disabled={refreshing || isAuditor}
{new Date(metrics.lastUpdated || Date.now()).toLocaleString()} >
</span> {refreshing ? (
</p> <>
</div> <svg
className="animate-spin -ml-1 mr-2 h-4 w-4 text-white"
<div className="flex items-center gap-3"> xmlns="http://www.w3.org/2000/svg"
<Button fill="none"
onClick={handleRefresh} viewBox="0 0 24 24"
disabled={refreshing || isAuditor}
size="sm"
className="gap-2 bg-blue-600 hover:bg-blue-700 border-0 shadow-sm"
> >
<RefreshCw className={`h-4 w-4 ${refreshing ? 'animate-spin' : ''}`} /> <circle
{refreshing ? "Refreshing..." : "Refresh"} className="opacity-25"
</Button> cx="12"
cy="12"
<DropdownMenu> r="10"
<DropdownMenuTrigger asChild> stroke="currentColor"
<Button variant="outline" size="sm" className="border-gray-200 hover:bg-gray-50"> strokeWidth="4"
<MoreVertical className="h-4 w-4" /> ></circle>
</Button> <path
</DropdownMenuTrigger> className="opacity-75"
<DropdownMenuContent align="end" className="border-gray-200 shadow-lg"> fill="currentColor"
<DropdownMenuItem onClick={() => signOut({ callbackUrl: "/login" })}> d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"
<LogOut className="h-4 w-4 mr-2" /> ></path>
Sign out </svg>
</DropdownMenuItem> Refreshing...
</DropdownMenuContent> </>
</DropdownMenu> ) : (
</div> "Refresh Data"
</div>
{/* Date Range Controls */}
{dateRange && (
<div className="border-t border-gray-100 pt-6">
<div className="flex flex-col sm:flex-row items-start sm:items-center gap-4">
<div className="flex items-center gap-2">
<Calendar className="h-4 w-4 text-gray-500" />
<span className="text-sm font-medium text-gray-700">Date Range:</span>
</div>
<div className="flex items-center gap-3">
<div className="flex items-center gap-2">
<label className="text-sm text-gray-600">From:</label>
<input
type="date"
value={selectedStartDate}
min={dateRange.minDate}
max={dateRange.maxDate}
onChange={(e) => handleDateRangeChange(e.target.value, selectedEndDate)}
className="px-3 py-1.5 text-sm border border-gray-200 rounded-lg focus:outline-none focus:ring-2 focus:ring-blue-500 focus:border-transparent"
/>
</div>
<div className="flex items-center gap-2">
<label className="text-sm text-gray-600">To:</label>
<input
type="date"
value={selectedEndDate}
min={dateRange.minDate}
max={dateRange.maxDate}
onChange={(e) => handleDateRangeChange(selectedStartDate, e.target.value)}
className="px-3 py-1.5 text-sm border border-gray-200 rounded-lg focus:outline-none focus:ring-2 focus:ring-blue-500 focus:border-transparent"
/>
</div>
<div className="flex gap-2">
<Button
variant="outline"
size="sm"
onClick={() => {
const endDate = new Date().toISOString().split('T')[0];
const startDate = new Date(Date.now() - 7 * 24 * 60 * 60 * 1000).toISOString().split('T')[0];
handleDateRangeChange(startDate, endDate);
}}
className="text-xs border-gray-200 hover:bg-gray-50"
>
Last 7 days
</Button>
<Button
variant="outline"
size="sm"
onClick={() => {
const endDate = new Date().toISOString().split('T')[0];
const startDate = new Date(Date.now() - 30 * 24 * 60 * 60 * 1000).toISOString().split('T')[0];
handleDateRangeChange(startDate, endDate);
}}
className="text-xs border-gray-200 hover:bg-gray-50"
>
Last 30 days
</Button>
<Button
variant="outline"
size="sm"
onClick={() => handleDateRangeChange(dateRange.minDate, dateRange.maxDate)}
className="text-xs border-gray-200 hover:bg-gray-50"
>
All time
</Button>
</div>
</div>
</div>
<p className="text-xs text-gray-500 mt-2">
Available data: {new Date(dateRange.minDate).toLocaleDateString()} - {new Date(dateRange.maxDate).toLocaleDateString()}
</p>
</div>
)} )}
</div> </button>
</CardHeader> <button
</Card> className="bg-slate-100 text-slate-700 py-2 px-5 rounded-lg shadow hover:bg-slate-200 transition-colors flex items-center text-sm font-medium"
onClick={() => signOut({ callbackUrl: "/login" })}
>
Sign out
</button>
</div>
</div>
{/* Modern Metrics Grid */} {/* Date Range Picker */}
<div className="grid grid-cols-1 sm:grid-cols-2 lg:grid-cols-4 gap-6"> {dateRange && (
<DateRangePicker
minDate={dateRange.minDate}
maxDate={dateRange.maxDate}
onDateRangeChange={handleDateRangeChange}
initialStartDate={selectedStartDate}
initialEndDate={selectedEndDate}
/>
)}
<div className="grid grid-cols-1 sm:grid-cols-2 lg:grid-cols-4 xl:grid-cols-7 gap-4">
<MetricCard <MetricCard
title="Total Sessions" title="Total Sessions"
value={metrics.totalSessions?.toLocaleString()} value={metrics.totalSessions}
icon={<MessageSquare className="h-5 w-5" />} icon={
<svg
className="h-5 w-5"
xmlns="http://www.w3.org/2000/svg"
fill="none"
viewBox="0 0 24 24"
stroke="currentColor"
strokeWidth="1"
>
<path
strokeLinecap="round"
strokeLinejoin="round"
d="M7 20l4-16m2 16l4-16M6 9h14M4 15h14"
/>
</svg>
}
trend={{ trend={{
value: metrics.sessionTrend ?? 0, value: metrics.sessionTrend ?? 0,
isPositive: (metrics.sessionTrend ?? 0) >= 0, isPositive: (metrics.sessionTrend ?? 0) >= 0,
}} }}
variant="primary"
/> />
<MetricCard <MetricCard
title="Unique Users" title="Unique Users"
value={metrics.uniqueUsers?.toLocaleString()} value={metrics.uniqueUsers}
icon={<Users className="h-5 w-5" />} icon={
<svg
className="h-5 w-5"
xmlns="http://www.w3.org/2000/svg"
fill="none"
viewBox="0 0 24 24"
stroke="currentColor"
strokeWidth="1"
>
<path
strokeLinecap="round"
strokeLinejoin="round"
d="M16 7a4 4 0 11-8 0 4 4 0 018 0zM12 14a7 7 0 00-7 7h14a7 7 0 00-7-7z"
/>
</svg>
}
trend={{ trend={{
value: metrics.usersTrend ?? 0, value: metrics.usersTrend ?? 0,
isPositive: (metrics.usersTrend ?? 0) >= 0, isPositive: (metrics.usersTrend ?? 0) >= 0,
}} }}
variant="success"
/> />
<MetricCard <MetricCard
title="Avg. Session Time" title="Avg. Session Time"
value={`${Math.round(metrics.avgSessionLength || 0)}s`} value={`${Math.round(metrics.avgSessionLength || 0)}s`}
icon={<Clock className="h-5 w-5" />} icon={
<svg
className="h-5 w-5"
xmlns="http://www.w3.org/2000/svg"
fill="none"
viewBox="0 0 24 24"
stroke="currentColor"
strokeWidth="1"
>
<path
strokeLinecap="round"
strokeLinejoin="round"
d="M12 8v4l3 3m6-3a9 9 0 11-18 0 9 9 0 0118 0z"
/>
</svg>
}
trend={{ trend={{
value: metrics.avgSessionTimeTrend ?? 0, value: metrics.avgSessionTimeTrend ?? 0,
isPositive: (metrics.avgSessionTimeTrend ?? 0) >= 0, isPositive: (metrics.avgSessionTimeTrend ?? 0) >= 0,
}} }}
variant="primary"
/> />
<MetricCard <MetricCard
title="Avg. Response Time" title="Avg. Response Time"
value={`${metrics.avgResponseTime?.toFixed(1) || 0}s`} value={`${metrics.avgResponseTime?.toFixed(1) || 0}s`}
icon={<Zap className="h-5 w-5" />} icon={
<svg
className="h-5 w-5"
xmlns="http://www.w3.org/2000/svg"
fill="none"
viewBox="0 0 24 24"
stroke="currentColor"
strokeWidth="1"
>
<path
strokeLinecap="round"
strokeLinejoin="round"
d="M13 10V3L4 14h7v7l9-11h-7z"
/>
</svg>
}
trend={{ trend={{
value: metrics.avgResponseTimeTrend ?? 0, value: metrics.avgResponseTimeTrend ?? 0,
isPositive: (metrics.avgResponseTimeTrend ?? 0) <= 0, isPositive: (metrics.avgResponseTimeTrend ?? 0) <= 0, // Lower response time is better
}} }}
variant="warning"
/> />
<MetricCard <MetricCard
title="Daily Costs" title="Avg. Daily Costs"
value={`${metrics.avgDailyCosts?.toFixed(4) || '0.0000'}`} value={`${metrics.avgDailyCosts?.toFixed(4) || "0.0000"}`}
icon={<Euro className="h-5 w-5" />} icon={
description="Average per day" <svg
variant="warning" className="h-5 w-5"
xmlns="http://www.w3.org/2000/svg"
fill="none"
viewBox="0 0 24 24"
stroke="currentColor"
strokeWidth="1"
>
<path
strokeLinecap="round"
strokeLinejoin="round"
d="M12 8c-1.657 0-3 .895-3 2s1.343 2 3 2 3 .895 3 2-1.343 2-3 2m0-8c1.11 0 2.08.402 2.599 1M12 8V7m0 1v8m0 0v1m0-1c-1.11 0-2.08-.402-2.599-1M21 12a9 9 0 11-18 0 9 9 0 0118 0z"
/>
</svg>
}
/> />
<MetricCard <MetricCard
title="Peak Usage" title="Peak Usage Time"
value={metrics.peakUsageTime || 'N/A'} value={metrics.peakUsageTime || "N/A"}
icon={<TrendingUp className="h-5 w-5" />} icon={
description="Busiest hour" <svg
variant="primary" className="h-5 w-5"
xmlns="http://www.w3.org/2000/svg"
fill="none"
viewBox="0 0 24 24"
stroke="currentColor"
strokeWidth="1"
>
<path
strokeLinecap="round"
strokeLinejoin="round"
d="M9 19v-6a2 2 0 00-2-2H5a2 2 0 00-2 2v6a2 2 0 002 2h2a2 2 0 002-2zm0 0V9a2 2 0 012-2h2a2 2 0 012 2v10m-6 0a2 2 0 002 2h2a2 2 0 002-2m0 0V5a2 2 0 012-2h2a2 2 0 012 2v14a2 2 0 01-2 2h-2a2 2 0 01-2-2z"
/>
</svg>
}
/> />
<MetricCard <MetricCard
title="Resolution Rate" title="Resolved Chats"
value={`${metrics.resolvedChatsPercentage?.toFixed(1) || '0.0'}%`} value={`${metrics.resolvedChatsPercentage?.toFixed(1) || "0.0"}%`}
icon={<CheckCircle className="h-5 w-5" />} icon={
<svg
className="h-5 w-5"
xmlns="http://www.w3.org/2000/svg"
fill="none"
viewBox="0 0 24 24"
stroke="currentColor"
strokeWidth="1"
>
<path
strokeLinecap="round"
strokeLinejoin="round"
d="M9 12l2 2 4-4m6 2a9 9 0 11-18 0 9 9 0 0118 0z"
/>
</svg>
}
trend={{ trend={{
value: metrics.resolvedChatsPercentage ?? 0, value: metrics.resolvedChatsPercentage ?? 0,
isPositive: (metrics.resolvedChatsPercentage ?? 0) >= 80, isPositive: (metrics.resolvedChatsPercentage ?? 0) >= 80, // 80%+ resolution rate is good
}} }}
variant={metrics.resolvedChatsPercentage && metrics.resolvedChatsPercentage >= 80 ? "success" : "warning"}
/>
<MetricCard
title="Active Languages"
value={Object.keys(metrics.languages || {}).length}
icon={<Globe className="h-5 w-5" />}
description="Languages detected"
variant="success"
/> />
</div> </div>
{/* Charts Section */}
<div className="grid grid-cols-1 lg:grid-cols-3 gap-6"> <div className="grid grid-cols-1 lg:grid-cols-3 gap-6">
<ModernLineChart <div className="bg-white p-6 rounded-xl shadow lg:col-span-2">
data={getSessionsOverTimeData()} <h3 className="font-bold text-lg text-gray-800 mb-4">
title="Sessions Over Time" Sessions Over Time
className="lg:col-span-2" </h3>
height={350} <SessionsLineChart sessionsPerDay={metrics.days} />
/> </div>
<div className="bg-white p-6 rounded-xl shadow">
<ModernDonutChart <h3 className="font-bold text-lg text-gray-800 mb-4">
data={getSentimentData()} Conversation Sentiment
title="Conversation Sentiment" </h3>
centerText={{ <DonutChart
title: "Total", data={{
value: metrics.totalSessions || 0, labels: ["Positive", "Neutral", "Negative"],
}} values: [
height={350} getSentimentData().positive,
/> getSentimentData().neutral,
getSentimentData().negative,
],
colors: ["#1cad7c", "#a1a1a1", "#dc2626"],
}}
centerText={{
title: "Total",
value: metrics.totalSessions,
}}
/>
</div>
</div> </div>
<div className="grid grid-cols-1 lg:grid-cols-2 gap-6"> <div className="grid grid-cols-1 lg:grid-cols-2 gap-6">
<ModernBarChart <div className="bg-white p-6 rounded-xl shadow">
data={getCategoriesData()} <h3 className="font-bold text-lg text-gray-800 mb-4">
title="Sessions by Category" Sessions by Category
height={350} </h3>
/> <CategoriesBarChart categories={metrics.categories || {}} />
</div>
<ModernDonutChart <div className="bg-white p-6 rounded-xl shadow">
data={getLanguagesData()} <h3 className="font-bold text-lg text-gray-800 mb-4">
title="Languages Used" Languages Used
height={350} </h3>
/> <LanguagePieChart languages={metrics.languages || {}} />
</div>
</div> </div>
{/* Geographic and Topics Section */}
<div className="grid grid-cols-1 lg:grid-cols-2 gap-6"> <div className="grid grid-cols-1 lg:grid-cols-2 gap-6">
<Card> <div className="bg-white p-6 rounded-xl shadow">
<CardHeader> <h3 className="font-bold text-lg text-gray-800 mb-4">
<CardTitle className="flex items-center gap-2"> Geographic Distribution
<Globe className="h-5 w-5" /> </h3>
Geographic Distribution <GeographicMap countries={getCountryData()} />
</CardTitle> </div>
</CardHeader>
<CardContent>
<GeographicMap countries={getCountryData()} />
</CardContent>
</Card>
<Card> <div className="bg-white p-6 rounded-xl shadow">
<CardHeader> <h3 className="font-bold text-lg text-gray-800 mb-4">
<CardTitle className="flex items-center gap-2"> Common Topics
<MessageCircle className="h-5 w-5" /> </h3>
Common Topics <div className="h-[300px]">
</CardTitle> <WordCloud words={getWordCloudData()} width={500} height={400} />
</CardHeader> </div>
<CardContent> </div>
<div className="h-[300px]">
<WordCloud words={getWordCloudData()} width={500} height={300} />
</div>
</CardContent>
</Card>
</div> </div>
{/* Top Questions Chart */} {/* Top Questions Chart */}
<TopQuestionsChart data={metrics.topQuestions || []} /> <TopQuestionsChart data={metrics.topQuestions || []} />
{/* Response Time Distribution */} <div className="bg-white p-6 rounded-xl shadow">
<Card> <h3 className="font-bold text-lg text-gray-800 mb-4">
<CardHeader> Response Time Distribution
<CardTitle>Response Time Distribution</CardTitle> </h3>
</CardHeader> <ResponseTimeDistribution
<CardContent> data={getResponseTimeData()}
<ResponseTimeDistribution average={metrics.avgResponseTime || 0}
data={getResponseTimeData()} />
average={metrics.avgResponseTime || 0} </div>
/> <div className="bg-white p-6 rounded-xl shadow">
</CardContent> <div className="flex flex-col sm:flex-row justify-between items-start sm:items-center gap-3 mb-4">
</Card> <h3 className="font-bold text-lg text-gray-800">
Token Usage & Costs
{/* Token Usage Summary */} </h3>
<Card> <div className="flex flex-col sm:flex-row gap-2 sm:gap-4 w-full sm:w-auto">
<CardHeader> <div className="text-sm bg-blue-50 text-blue-700 px-3 py-1 rounded-full flex items-center">
<div className="flex flex-col sm:flex-row justify-between items-start sm:items-center gap-4"> <span className="font-semibold mr-1">Total Tokens:</span>
<CardTitle>AI Usage & Costs</CardTitle> {metrics.totalTokens?.toLocaleString() || 0}
<div className="flex flex-wrap gap-2"> </div>
<Badge variant="outline" className="gap-1"> <div className="text-sm bg-green-50 text-green-700 px-3 py-1 rounded-full flex items-center">
<span className="font-semibold">Total Tokens:</span> <span className="font-semibold mr-1">Total Cost:</span>
{metrics.totalTokens?.toLocaleString() || 0} {metrics.totalTokensEur?.toFixed(4) || 0}
</Badge>
<Badge variant="outline" className="gap-1">
<span className="font-semibold">Total Cost:</span>
{metrics.totalTokensEur?.toFixed(4) || 0}
</Badge>
</div> </div>
</div> </div>
</CardHeader> </div>
<CardContent> <TokenUsageChart tokenData={getTokenData()} />
<div className="text-center py-8 text-muted-foreground"> </div>
<p>Token usage chart will be implemented with historical data</p>
</div>
</CardContent>
</Card>
</div> </div>
); );
} }
// Our exported component
export default function DashboardPage() { export default function DashboardPage() {
return <DashboardContent />; return <DashboardContent />;
} }

View File

@ -4,19 +4,6 @@ import { useSession } from "next-auth/react";
import { useRouter } from "next/navigation"; import { useRouter } from "next/navigation";
import { useEffect, useState } from "react"; import { useEffect, useState } from "react";
import { FC } from "react"; import { FC } from "react";
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
import { Button } from "@/components/ui/button";
import { Badge } from "@/components/ui/badge";
import {
BarChart3,
MessageSquare,
Settings,
Users,
ArrowRight,
TrendingUp,
Shield,
Zap,
} from "lucide-react";
const DashboardPage: FC = () => { const DashboardPage: FC = () => {
const { data: session, status } = useSession(); const { data: session, status } = useSession();
@ -34,223 +21,82 @@ const DashboardPage: FC = () => {
if (loading) { if (loading) {
return ( return (
<div className="flex items-center justify-center min-h-[60vh]"> <div className="flex items-center justify-center min-h-[40vh]">
<div className="text-center space-y-4"> <div className="text-center">
<div className="animate-spin rounded-full h-12 w-12 border-b-2 border-primary mx-auto"></div> <div className="animate-spin rounded-full h-12 w-12 border-t-2 border-b-2 border-sky-500 mx-auto mb-4"></div>
<p className="text-lg text-muted-foreground">Loading dashboard...</p> <p className="text-lg text-gray-600">Loading dashboard...</p>
</div> </div>
</div> </div>
); );
} }
const navigationCards = [
{
title: "Analytics Overview",
description: "View comprehensive metrics, charts, and insights from your chat sessions",
icon: <BarChart3 className="h-6 w-6" />,
href: "/dashboard/overview",
variant: "primary" as const,
features: ["Real-time metrics", "Interactive charts", "Trend analysis"],
},
{
title: "Session Browser",
description: "Browse, search, and analyze individual conversation sessions",
icon: <MessageSquare className="h-6 w-6" />,
href: "/dashboard/sessions",
variant: "success" as const,
features: ["Session search", "Conversation details", "Export data"],
},
...(session?.user?.role === "ADMIN"
? [
{
title: "Company Settings",
description: "Configure company settings, integrations, and API connections",
icon: <Settings className="h-6 w-6" />,
href: "/dashboard/company",
variant: "warning" as const,
features: ["API configuration", "Integration settings", "Data management"],
adminOnly: true,
},
{
title: "User Management",
description: "Invite team members and manage user accounts and permissions",
icon: <Users className="h-6 w-6" />,
href: "/dashboard/users",
variant: "default" as const,
features: ["User invitations", "Role management", "Access control"],
adminOnly: true,
},
]
: []),
];
const getCardClasses = (variant: string) => {
switch (variant) {
case "primary":
return "border-primary/20 bg-linear-to-br from-primary/5 to-primary/10 hover:from-primary/10 hover:to-primary/15";
case "success":
return "border-green-200 bg-linear-to-br from-green-50 to-green-100 hover:from-green-100 hover:to-green-150 dark:border-green-800 dark:from-green-950 dark:to-green-900";
case "warning":
return "border-amber-200 bg-linear-to-br from-amber-50 to-amber-100 hover:from-amber-100 hover:to-amber-150 dark:border-amber-800 dark:from-amber-950 dark:to-amber-900";
default:
return "border-border bg-linear-to-br from-card to-muted/20 hover:from-muted/30 hover:to-muted/40";
}
};
const getIconClasses = (variant: string) => {
switch (variant) {
case "primary":
return "bg-primary/10 text-primary border-primary/20";
case "success":
return "bg-green-100 text-green-600 border-green-200 dark:bg-green-900 dark:text-green-400 dark:border-green-800";
case "warning":
return "bg-amber-100 text-amber-600 border-amber-200 dark:bg-amber-900 dark:text-amber-400 dark:border-amber-800";
default:
return "bg-muted text-muted-foreground border-border";
}
};
return ( return (
<div className="space-y-8"> <div className="space-y-6">
{/* Welcome Header */} <div className="bg-white rounded-xl shadow p-6">
<Card className="border-0 bg-linear-to-r from-primary/5 via-primary/10 to-primary/5"> <h1 className="text-2xl font-bold mb-4">Dashboard</h1>
<CardHeader>
<div className="flex flex-col sm:flex-row justify-between items-start sm:items-center gap-4"> <div className="grid sm:grid-cols-2 lg:grid-cols-3 gap-6">
<div className="space-y-2"> <div className="bg-gradient-to-br from-sky-50 to-sky-100 p-6 rounded-xl shadow-sm hover:shadow-md transition-shadow">
<div className="flex items-center gap-3"> <h2 className="text-lg font-semibold text-sky-700">Analytics</h2>
<h1 className="text-3xl font-bold tracking-tight"> <p className="text-gray-600 mt-2 mb-4">
Welcome back, {session?.user?.name || "User"}! View your chat session metrics and analytics
</h1> </p>
<Badge variant="secondary" className="text-xs"> <button
{session?.user?.role} onClick={() => router.push("/dashboard/overview")}
</Badge> className="bg-sky-500 hover:bg-sky-600 text-white px-4 py-2 rounded-lg text-sm font-medium transition-colors"
</div> >
<p className="text-muted-foreground"> View Analytics
Choose a section below to explore your analytics dashboard </button>
</div>
<div className="bg-gradient-to-br from-emerald-50 to-emerald-100 p-6 rounded-xl shadow-sm hover:shadow-md transition-shadow">
<h2 className="text-lg font-semibold text-emerald-700">Sessions</h2>
<p className="text-gray-600 mt-2 mb-4">
Browse and analyze conversation sessions
</p>
<button
onClick={() => router.push("/dashboard/sessions")}
className="bg-emerald-500 hover:bg-emerald-600 text-white px-4 py-2 rounded-lg text-sm font-medium transition-colors"
>
View Sessions
</button>
</div>
{session?.user?.role === "admin" && (
<div className="bg-gradient-to-br from-purple-50 to-purple-100 p-6 rounded-xl shadow-sm hover:shadow-md transition-shadow">
<h2 className="text-lg font-semibold text-purple-700">
Company Settings
</h2>
<p className="text-gray-600 mt-2 mb-4">
Configure company settings and integrations
</p> </p>
</div> <button
onClick={() => router.push("/dashboard/company")}
<div className="flex items-center gap-2"> className="bg-purple-500 hover:bg-purple-600 text-white px-4 py-2 rounded-lg text-sm font-medium transition-colors"
<div className="flex items-center gap-1 text-sm text-muted-foreground">
<Shield className="h-4 w-4" />
Secure Dashboard
</div>
</div>
</div>
</CardHeader>
</Card>
{/* Navigation Cards */}
<div className="grid grid-cols-1 lg:grid-cols-2 gap-6">
{navigationCards.map((card, index) => (
<Card
key={index}
className={`relative overflow-hidden transition-all duration-200 hover:shadow-lg hover:-translate-y-0.5 cursor-pointer ${getCardClasses(
card.variant
)}`}
onClick={() => router.push(card.href)}
>
{/* Subtle gradient overlay */}
<div className="absolute inset-0 bg-linear-to-br from-white/50 to-transparent dark:from-white/5 pointer-events-none" />
<CardHeader className="relative">
<div className="flex items-start justify-between">
<div className="space-y-3">
<div className="flex items-center gap-3">
<div
className={`flex h-12 w-12 shrink-0 items-center justify-center rounded-full border transition-colors ${getIconClasses(
card.variant
)}`}
>
{card.icon}
</div>
<div>
<CardTitle className="text-xl font-semibold flex items-center gap-2">
{card.title}
{card.adminOnly && (
<Badge variant="outline" className="text-xs">
Admin
</Badge>
)}
</CardTitle>
</div>
</div>
<p className="text-muted-foreground leading-relaxed">
{card.description}
</p>
</div>
</div>
</CardHeader>
<CardContent className="relative space-y-4">
{/* Features List */}
<div className="space-y-2">
{card.features.map((feature, featureIndex) => (
<div key={featureIndex} className="flex items-center gap-2 text-sm">
<div className="h-1.5 w-1.5 rounded-full bg-current opacity-60" />
<span className="text-muted-foreground">{feature}</span>
</div>
))}
</div>
{/* Action Button */}
<Button
className="w-full gap-2 mt-4"
variant={card.variant === "primary" ? "default" : "outline"}
onClick={(e) => {
e.stopPropagation();
router.push(card.href);
}}
> >
<span> Manage Settings
{card.title === "Analytics Overview" && "View Analytics"} </button>
{card.title === "Session Browser" && "Browse Sessions"} </div>
{card.title === "Company Settings" && "Manage Settings"} )}
{card.title === "User Management" && "Manage Users"}
</span>
<ArrowRight className="h-4 w-4" />
</Button>
</CardContent>
</Card>
))}
</div>
{/* Quick Stats */} {session?.user?.role === "admin" && (
<Card> <div className="bg-gradient-to-br from-amber-50 to-amber-100 p-6 rounded-xl shadow-sm hover:shadow-md transition-shadow">
<CardHeader> <h2 className="text-lg font-semibold text-amber-700">
<CardTitle className="flex items-center gap-2"> User Management
<TrendingUp className="h-5 w-5" /> </h2>
Quick Stats <p className="text-gray-600 mt-2 mb-4">
</CardTitle> Invite and manage user accounts
</CardHeader> </p>
<CardContent> <button
<div className="grid grid-cols-1 sm:grid-cols-3 gap-6"> onClick={() => router.push("/dashboard/users")}
<div className="text-center space-y-2"> className="bg-amber-500 hover:bg-amber-600 text-white px-4 py-2 rounded-lg text-sm font-medium transition-colors"
<div className="flex items-center justify-center gap-2"> >
<Zap className="h-5 w-5 text-primary" /> Manage Users
<span className="text-2xl font-bold">Real-time</span> </button>
</div>
<p className="text-sm text-muted-foreground">Data updates</p>
</div> </div>
)}
<div className="text-center space-y-2"> </div>
<div className="flex items-center justify-center gap-2"> </div>
<Shield className="h-5 w-5 text-green-600" />
<span className="text-2xl font-bold">Secure</span>
</div>
<p className="text-sm text-muted-foreground">Data protection</p>
</div>
<div className="text-center space-y-2">
<div className="flex items-center justify-center gap-2">
<BarChart3 className="h-5 w-5 text-blue-600" />
<span className="text-2xl font-bold">Advanced</span>
</div>
<p className="text-sm text-muted-foreground">Analytics</p>
</div>
</div>
</CardContent>
</Card>
</div> </div>
); );
}; };

View File

@ -4,7 +4,7 @@ import { useEffect, useState } from "react";
import { useParams, useRouter } from "next/navigation"; // Import useRouter import { useParams, useRouter } from "next/navigation"; // Import useRouter
import { useSession } from "next-auth/react"; // Import useSession import { useSession } from "next-auth/react"; // Import useSession
import SessionDetails from "../../../../components/SessionDetails"; import SessionDetails from "../../../../components/SessionDetails";
import TranscriptViewer from "../../../../components/TranscriptViewer";
import MessageViewer from "../../../../components/MessageViewer"; import MessageViewer from "../../../../components/MessageViewer";
import { ChatSession } from "../../../../lib/types"; import { ChatSession } from "../../../../lib/types";
import Link from "next/link"; import Link from "next/link";
@ -108,7 +108,7 @@ export default function SessionViewPage() {
} }
return ( return (
<div className="min-h-screen bg-linear-to-br from-slate-50 to-sky-100 p-4 md:p-6"> <div className="min-h-screen bg-gradient-to-br from-slate-50 to-sky-100 p-4 md:p-6">
<div className="max-w-4xl mx-auto"> <div className="max-w-4xl mx-auto">
<div className="mb-6"> <div className="mb-6">
<Link <Link

View File

@ -37,7 +37,7 @@ export default function DashboardSettings({
else setMessage("Failed."); else setMessage("Failed.");
} }
if (session.user.role !== "ADMIN") return null; if (session.user.role !== "admin") return null;
return ( return (
<div className="bg-white p-6 rounded-xl shadow mb-6"> <div className="bg-white p-6 rounded-xl shadow mb-6">

View File

@ -34,7 +34,7 @@ export default function UserManagement({ session }: UserManagementProps) {
else setMsg("Failed."); else setMsg("Failed.");
} }
if (session.user.role !== "ADMIN") return null; if (session.user.role !== "admin") return null;
return ( return (
<div className="bg-white p-6 rounded-xl shadow mb-6"> <div className="bg-white p-6 rounded-xl shadow mb-6">
@ -52,8 +52,8 @@ export default function UserManagement({ session }: UserManagementProps) {
onChange={(e) => setRole(e.target.value)} onChange={(e) => setRole(e.target.value)}
> >
<option value="user">User</option> <option value="user">User</option>
<option value="ADMIN">Admin</option> <option value="admin">Admin</option>
<option value="AUDITOR">Auditor</option> <option value="auditor">Auditor</option>
</select> </select>
<button <button
className="bg-blue-600 text-white rounded px-4 py-2 sm:py-0 w-full sm:w-auto" className="bg-blue-600 text-white rounded px-4 py-2 sm:py-0 w-full sm:w-auto"

View File

@ -69,7 +69,7 @@ export default function UserManagementPage() {
} }
// Check for admin access // Check for admin access
if (session?.user?.role !== "ADMIN") { if (session?.user?.role !== "admin") {
return ( return (
<div className="text-center py-10 bg-white rounded-xl shadow p-6"> <div className="text-center py-10 bg-white rounded-xl shadow p-6">
<h2 className="font-bold text-xl text-red-600 mb-2">Access Denied</h2> <h2 className="font-bold text-xl text-red-600 mb-2">Access Denied</h2>
@ -124,8 +124,8 @@ export default function UserManagementPage() {
onChange={(e) => setRole(e.target.value)} onChange={(e) => setRole(e.target.value)}
> >
<option value="user">User</option> <option value="user">User</option>
<option value="ADMIN">Admin</option> <option value="admin">Admin</option>
<option value="AUDITOR">Auditor</option> <option value="auditor">Auditor</option>
</select> </select>
</div> </div>
@ -183,9 +183,9 @@ export default function UserManagementPage() {
<td className="px-6 py-4 whitespace-nowrap text-sm text-gray-500"> <td className="px-6 py-4 whitespace-nowrap text-sm text-gray-500">
<span <span
className={`px-2 inline-flex text-xs leading-5 font-semibold rounded-full ${ className={`px-2 inline-flex text-xs leading-5 font-semibold rounded-full ${
user.role === "ADMIN" user.role === "admin"
? "bg-purple-100 text-purple-800" ? "bg-purple-100 text-purple-800"
: user.role === "AUDITOR" : user.role === "auditor"
? "bg-blue-100 text-blue-800" ? "bg-blue-100 text-blue-800"
: "bg-green-100 text-green-800" : "bg-green-100 text-green-800"
}`} }`}

View File

@ -43,71 +43,71 @@
:root { :root {
--radius: 0.625rem; --radius: 0.625rem;
--background: 255 255 255; --background: oklch(1 0 0);
--foreground: 15 23 42; --foreground: oklch(0.145 0 0);
--card: 255 255 255; --card: oklch(1 0 0);
--card-foreground: 15 23 42; --card-foreground: oklch(0.145 0 0);
--popover: 255 255 255; --popover: oklch(1 0 0);
--popover-foreground: 15 23 42; --popover-foreground: oklch(0.145 0 0);
--primary: 0 123 255; --primary: oklch(0.205 0 0);
--primary-foreground: 255 255 255; --primary-foreground: oklch(0.985 0 0);
--secondary: 245 245 245; --secondary: oklch(0.97 0 0);
--secondary-foreground: 51 51 51; --secondary-foreground: oklch(0.205 0 0);
--muted: 248 250 252; --muted: oklch(0.97 0 0);
--muted-foreground: 100 116 139; --muted-foreground: oklch(0.556 0 0);
--accent: 245 245 245; --accent: oklch(0.97 0 0);
--accent-foreground: 51 51 51; --accent-foreground: oklch(0.205 0 0);
--destructive: 239 68 68; --destructive: oklch(0.577 0.245 27.325);
--border: 229 231 235; --border: oklch(0.922 0 0);
--input: 229 231 235; --input: oklch(0.922 0 0);
--ring: 0 123 255; --ring: oklch(0.708 0 0);
--chart-1: 0 123 255; --chart-1: oklch(0.646 0.222 41.116);
--chart-2: 255 20 147; --chart-2: oklch(0.6 0.118 184.704);
--chart-3: 50 205 50; --chart-3: oklch(0.398 0.07 227.392);
--chart-4: 138 43 226; --chart-4: oklch(0.828 0.189 84.429);
--chart-5: 255 215 0; --chart-5: oklch(0.769 0.188 70.08);
--sidebar: 248 250 252; --sidebar: oklch(0.985 0 0);
--sidebar-foreground: 15 23 42; --sidebar-foreground: oklch(0.145 0 0);
--sidebar-primary: 0 123 255; --sidebar-primary: oklch(0.205 0 0);
--sidebar-primary-foreground: 255 255 255; --sidebar-primary-foreground: oklch(0.985 0 0);
--sidebar-accent: 245 245 245; --sidebar-accent: oklch(0.97 0 0);
--sidebar-accent-foreground: 51 51 51; --sidebar-accent-foreground: oklch(0.205 0 0);
--sidebar-border: 229 231 235; --sidebar-border: oklch(0.922 0 0);
--sidebar-ring: 0 123 255; --sidebar-ring: oklch(0.708 0 0);
} }
.dark { .dark {
--background: 15 23 42; --background: oklch(0.145 0 0);
--foreground: 248 250 252; --foreground: oklch(0.985 0 0);
--card: 30 41 59; --card: oklch(0.205 0 0);
--card-foreground: 248 250 252; --card-foreground: oklch(0.985 0 0);
--popover: 30 41 59; --popover: oklch(0.205 0 0);
--popover-foreground: 248 250 252; --popover-foreground: oklch(0.985 0 0);
--primary: 59 130 246; --primary: oklch(0.922 0 0);
--primary-foreground: 15 23 42; --primary-foreground: oklch(0.205 0 0);
--secondary: 51 65 85; --secondary: oklch(0.269 0 0);
--secondary-foreground: 248 250 252; --secondary-foreground: oklch(0.985 0 0);
--muted: 51 65 85; --muted: oklch(0.269 0 0);
--muted-foreground: 148 163 184; --muted-foreground: oklch(0.708 0 0);
--accent: 51 65 85; --accent: oklch(0.269 0 0);
--accent-foreground: 248 250 252; --accent-foreground: oklch(0.985 0 0);
--destructive: 248 113 113; --destructive: oklch(0.704 0.191 22.216);
--border: 51 65 85; --border: oklch(1 0 0 / 10%);
--input: 51 65 85; --input: oklch(1 0 0 / 15%);
--ring: 59 130 246; --ring: oklch(0.556 0 0);
--chart-1: 59 130 246; --chart-1: oklch(0.488 0.243 264.376);
--chart-2: 236 72 153; --chart-2: oklch(0.696 0.17 162.48);
--chart-3: 34 197 94; --chart-3: oklch(0.769 0.188 70.08);
--chart-4: 147 51 234; --chart-4: oklch(0.627 0.265 303.9);
--chart-5: 251 191 36; --chart-5: oklch(0.645 0.246 16.439);
--sidebar: 30 41 59; --sidebar: oklch(0.205 0 0);
--sidebar-foreground: 248 250 252; --sidebar-foreground: oklch(0.985 0 0);
--sidebar-primary: 59 130 246; --sidebar-primary: oklch(0.488 0.243 264.376);
--sidebar-primary-foreground: 248 250 252; --sidebar-primary-foreground: oklch(0.985 0 0);
--sidebar-accent: 51 65 85; --sidebar-accent: oklch(0.269 0 0);
--sidebar-accent-foreground: 248 250 252; --sidebar-accent-foreground: oklch(0.985 0 0);
--sidebar-border: 51 65 85; --sidebar-border: oklch(1 0 0 / 10%);
--sidebar-ring: 59 130 246; --sidebar-ring: oklch(0.556 0 0);
} }
@layer base { @layer base {
@ -115,25 +115,6 @@
@apply border-border outline-ring/50; @apply border-border outline-ring/50;
} }
body { body {
@apply bg-gray-50 text-gray-900; @apply bg-background text-foreground;
}
/* Apple-style scrollbars */
::-webkit-scrollbar {
width: 8px;
height: 8px;
}
::-webkit-scrollbar-track {
background: transparent;
}
::-webkit-scrollbar-thumb {
background: rgba(0, 0, 0, 0.2);
border-radius: 4px;
}
::-webkit-scrollbar-thumb:hover {
background: rgba(0, 0, 0, 0.3);
} }
} }

View File

@ -7,9 +7,9 @@ export function Providers({ children }: { children: ReactNode }) {
// Including error handling and refetch interval for better user experience // Including error handling and refetch interval for better user experience
return ( return (
<SessionProvider <SessionProvider
// Re-fetch session every 30 minutes (reduced from 10) // Re-fetch session every 10 minutes
refetchInterval={30 * 60} refetchInterval={10 * 60}
refetchOnWindowFocus={false} refetchOnWindowFocus={true}
> >
{children} {children}
</SessionProvider> </SessionProvider>

View File

@ -7,7 +7,7 @@ export default function RegisterPage() {
const [company, setCompany] = useState<string>(""); const [company, setCompany] = useState<string>("");
const [password, setPassword] = useState<string>(""); const [password, setPassword] = useState<string>("");
const [csvUrl, setCsvUrl] = useState<string>(""); const [csvUrl, setCsvUrl] = useState<string>("");
const [role, setRole] = useState<string>("ADMIN"); // Default to ADMIN for company registration const [role, setRole] = useState<string>("admin"); // Default to admin for company registration
const [error, setError] = useState<string>(""); const [error, setError] = useState<string>("");
const router = useRouter(); const router = useRouter();
@ -66,7 +66,7 @@ export default function RegisterPage() {
> >
<option value="admin">Admin</option> <option value="admin">Admin</option>
<option value="user">User</option> <option value="user">User</option>
<option value="AUDITOR">Auditor</option> <option value="auditor">Auditor</option>
</select> </select>
<button className="bg-blue-600 text-white rounded py-2" type="submit"> <button className="bg-blue-600 text-white rounded py-2" type="submit">
Register & Continue Register & Continue

View File

@ -1,78 +0,0 @@
import { PrismaClient } from '@prisma/client';
import { ProcessingStatusManager } from './lib/processingStatusManager';
const prisma = new PrismaClient();
async function checkRefactoredPipelineStatus() {
try {
console.log('=== REFACTORED PIPELINE STATUS ===\n');
// Get pipeline status using the new system
const pipelineStatus = await ProcessingStatusManager.getPipelineStatus();
console.log(`Total Sessions: ${pipelineStatus.totalSessions}\n`);
// Display status for each stage
const stages = ['CSV_IMPORT', 'TRANSCRIPT_FETCH', 'SESSION_CREATION', 'AI_ANALYSIS', 'QUESTION_EXTRACTION'];
for (const stage of stages) {
console.log(`${stage}:`);
const stageData = pipelineStatus.pipeline[stage] || {};
const pending = stageData.PENDING || 0;
const inProgress = stageData.IN_PROGRESS || 0;
const completed = stageData.COMPLETED || 0;
const failed = stageData.FAILED || 0;
const skipped = stageData.SKIPPED || 0;
console.log(` PENDING: ${pending}`);
console.log(` IN_PROGRESS: ${inProgress}`);
console.log(` COMPLETED: ${completed}`);
console.log(` FAILED: ${failed}`);
console.log(` SKIPPED: ${skipped}`);
console.log('');
}
// Show what needs processing
console.log('=== WHAT NEEDS PROCESSING ===');
for (const stage of stages) {
const stageData = pipelineStatus.pipeline[stage] || {};
const pending = stageData.PENDING || 0;
const failed = stageData.FAILED || 0;
if (pending > 0 || failed > 0) {
console.log(`${stage}: ${pending} pending, ${failed} failed`);
}
}
// Show failed sessions if any
const failedSessions = await ProcessingStatusManager.getFailedSessions();
if (failedSessions.length > 0) {
console.log('\n=== FAILED SESSIONS ===');
failedSessions.slice(0, 5).forEach(failure => {
console.log(` ${failure.session.import?.externalSessionId || failure.sessionId}: ${failure.stage} - ${failure.errorMessage}`);
});
if (failedSessions.length > 5) {
console.log(` ... and ${failedSessions.length - 5} more failed sessions`);
}
}
// Show sessions ready for AI processing
const readyForAI = await ProcessingStatusManager.getSessionsNeedingProcessing('AI_ANALYSIS', 5);
if (readyForAI.length > 0) {
console.log('\n=== SESSIONS READY FOR AI PROCESSING ===');
readyForAI.forEach(status => {
console.log(` ${status.session.import?.externalSessionId || status.sessionId} (created: ${status.session.createdAt})`);
});
}
} catch (error) {
console.error('Error checking pipeline status:', error);
} finally {
await prisma.$disconnect();
}
}
checkRefactoredPipelineStatus();

View File

@ -4,7 +4,7 @@
"rsc": true, "rsc": true,
"tsx": true, "tsx": true,
"tailwind": { "tailwind": {
"config": "", "config": "tailwind.config.js",
"css": "app/globals.css", "css": "app/globals.css",
"baseColor": "neutral", "baseColor": "neutral",
"cssVariables": true, "cssVariables": true,
@ -18,4 +18,4 @@
"hooks": "@/hooks" "hooks": "@/hooks"
}, },
"iconLibrary": "lucide" "iconLibrary": "lucide"
} }

View File

@ -128,9 +128,9 @@ export function SentimentChart({ sentimentData }: SentimentChartProps) {
sentimentData.negative, sentimentData.negative,
], ],
backgroundColor: [ backgroundColor: [
"rgba(37, 99, 235, 0.8)", // blue (primary) "rgba(34, 197, 94, 0.8)", // green
"rgba(107, 114, 128, 0.8)", // gray "rgba(249, 115, 22, 0.8)", // orange
"rgba(236, 72, 153, 0.8)", // pink "rgba(239, 68, 68, 0.8)", // red
], ],
borderWidth: 1, borderWidth: 1,
}, },
@ -196,12 +196,12 @@ export function LanguagePieChart({ languages }: LanguagePieChartProps) {
{ {
data, data,
backgroundColor: [ backgroundColor: [
"rgba(37, 99, 235, 0.8)", // blue (primary) "rgba(59, 130, 246, 0.8)",
"rgba(107, 114, 128, 0.8)", // gray "rgba(16, 185, 129, 0.8)",
"rgba(236, 72, 153, 0.8)", // pink "rgba(249, 115, 22, 0.8)",
"rgba(34, 197, 94, 0.8)", // lime green "rgba(236, 72, 153, 0.8)",
"rgba(168, 85, 247, 0.8)", // purple "rgba(139, 92, 246, 0.8)",
"rgba(251, 191, 36, 0.8)", // yellow "rgba(107, 114, 128, 0.8)",
], ],
borderWidth: 1, borderWidth: 1,
}, },

View File

@ -1,6 +1,6 @@
"use client"; "use client";
import { useState, useEffect, useRef, memo } from "react"; import { useState, useEffect } from "react";
interface DateRangePickerProps { interface DateRangePickerProps {
minDate: string; minDate: string;
@ -10,7 +10,7 @@ interface DateRangePickerProps {
initialEndDate?: string; initialEndDate?: string;
} }
function DateRangePicker({ export default function DateRangePicker({
minDate, minDate,
maxDate, maxDate,
onDateRangeChange, onDateRangeChange,
@ -19,27 +19,11 @@ function DateRangePicker({
}: DateRangePickerProps) { }: DateRangePickerProps) {
const [startDate, setStartDate] = useState(initialStartDate || minDate); const [startDate, setStartDate] = useState(initialStartDate || minDate);
const [endDate, setEndDate] = useState(initialEndDate || maxDate); const [endDate, setEndDate] = useState(initialEndDate || maxDate);
const isInitializedRef = useRef(false);
useEffect(() => { useEffect(() => {
// Update local state when props change (e.g., when date range is loaded from API) // Notify parent component when dates change
if (initialStartDate && initialStartDate !== startDate) { onDateRangeChange(startDate, endDate);
setStartDate(initialStartDate); }, [startDate, endDate, onDateRangeChange]);
}
if (initialEndDate && initialEndDate !== endDate) {
setEndDate(initialEndDate);
}
}, [initialStartDate, initialEndDate]);
useEffect(() => {
// Only notify parent component after initial render and when dates actually change
// This prevents the infinite loop by not including onDateRangeChange in dependencies
if (isInitializedRef.current) {
onDateRangeChange(startDate, endDate);
} else {
isInitializedRef.current = true;
}
}, [startDate, endDate]);
const handleStartDateChange = (newStartDate: string) => { const handleStartDateChange = (newStartDate: string) => {
// Ensure start date is not before min date // Ensure start date is not before min date
@ -79,10 +63,11 @@ function DateRangePicker({
const setLast30Days = () => { const setLast30Days = () => {
const thirtyDaysAgo = new Date(); const thirtyDaysAgo = new Date();
thirtyDaysAgo.setDate(thirtyDaysAgo.getDate() - 30); thirtyDaysAgo.setDate(thirtyDaysAgo.getDate() - 30);
const thirtyDaysAgoStr = thirtyDaysAgo.toISOString().split('T')[0]; const thirtyDaysAgoStr = thirtyDaysAgo.toISOString().split("T")[0];
// Use the later of 30 days ago or minDate // Use the later of 30 days ago or minDate
const newStartDate = thirtyDaysAgoStr > minDate ? thirtyDaysAgoStr : minDate; const newStartDate =
thirtyDaysAgoStr > minDate ? thirtyDaysAgoStr : minDate;
setStartDate(newStartDate); setStartDate(newStartDate);
setEndDate(maxDate); setEndDate(maxDate);
}; };
@ -90,7 +75,7 @@ function DateRangePicker({
const setLast7Days = () => { const setLast7Days = () => {
const sevenDaysAgo = new Date(); const sevenDaysAgo = new Date();
sevenDaysAgo.setDate(sevenDaysAgo.getDate() - 7); sevenDaysAgo.setDate(sevenDaysAgo.getDate() - 7);
const sevenDaysAgoStr = sevenDaysAgo.toISOString().split('T')[0]; const sevenDaysAgoStr = sevenDaysAgo.toISOString().split("T")[0];
// Use the later of 7 days ago or minDate // Use the later of 7 days ago or minDate
const newStartDate = sevenDaysAgoStr > minDate ? sevenDaysAgoStr : minDate; const newStartDate = sevenDaysAgoStr > minDate ? sevenDaysAgoStr : minDate;
@ -162,11 +147,9 @@ function DateRangePicker({
</div> </div>
<div className="mt-2 text-xs text-gray-500"> <div className="mt-2 text-xs text-gray-500">
Available data: {new Date(minDate).toLocaleDateString()} - {new Date(maxDate).toLocaleDateString()} Available data: {new Date(minDate).toLocaleDateString()} -{" "}
{new Date(maxDate).toLocaleDateString()}
</div> </div>
</div> </div>
); );
} }
// Export memoized component as default
export default memo(DateRangePicker);

View File

@ -48,7 +48,7 @@ const getCountryCoordinates = (): Record<string, [number, number]> => {
BG: [42.7339, 25.4858], BG: [42.7339, 25.4858],
HR: [45.1, 15.2], HR: [45.1, 15.2],
SK: [48.669, 19.699], SK: [48.669, 19.699],
SI: [46.1512, 14.9955] SI: [46.1512, 14.9955],
}; };
// This function now primarily returns fallbacks. // This function now primarily returns fallbacks.
// The actual fetching using @rapideditor/country-coder will be in the component's useEffect. // The actual fetching using @rapideditor/country-coder will be in the component's useEffect.

View File

@ -49,7 +49,7 @@ export default function MessageViewer({ messages }: MessageViewerProps) {
{message.role} {message.role}
</span> </span>
<span className="text-xs opacity-75 ml-2"> <span className="text-xs opacity-75 ml-2">
{message.timestamp ? new Date(message.timestamp).toLocaleTimeString() : 'No timestamp'} {new Date(message.timestamp).toLocaleTimeString()}
</span> </span>
</div> </div>
<div className="text-sm whitespace-pre-wrap"> <div className="text-sm whitespace-pre-wrap">
@ -63,14 +63,11 @@ export default function MessageViewer({ messages }: MessageViewerProps) {
<div className="mt-4 pt-3 border-t text-sm text-gray-500"> <div className="mt-4 pt-3 border-t text-sm text-gray-500">
<div className="flex justify-between"> <div className="flex justify-between">
<span> <span>
First message: {messages[0].timestamp ? new Date(messages[0].timestamp).toLocaleString() : 'No timestamp'} First message: {new Date(messages[0].timestamp).toLocaleString()}
</span> </span>
<span> <span>
Last message:{" "} Last message:{" "}
{(() => { {new Date(messages[messages.length - 1].timestamp).toLocaleString()}
const lastMessage = messages[messages.length - 1];
return lastMessage.timestamp ? new Date(lastMessage.timestamp).toLocaleString() : 'No timestamp';
})()}
</span> </span>
</div> </div>
</div> </div>

View File

@ -1,15 +1,10 @@
"use client"; "use client";
import { import { useRef, useEffect } from "react";
BarChart, import Chart from "chart.js/auto";
Bar, import annotationPlugin from "chartjs-plugin-annotation";
XAxis,
YAxis, Chart.register(annotationPlugin);
CartesianGrid,
Tooltip,
ResponsiveContainer,
ReferenceLine,
} from "recharts";
interface ResponseTimeDistributionProps { interface ResponseTimeDistributionProps {
data: number[]; data: number[];
@ -17,145 +12,114 @@ interface ResponseTimeDistributionProps {
targetResponseTime?: number; targetResponseTime?: number;
} }
const CustomTooltip = ({ active, payload, label }: any) => {
if (active && payload && payload.length) {
return (
<div className="rounded-lg border border-gray-200 bg-white p-3 shadow-md">
<p className="text-sm font-medium text-gray-900">{label}</p>
<p className="text-sm text-gray-600">
<span className="font-medium text-gray-900">
{payload[0].value}
</span>{" "}
responses
</p>
</div>
);
}
return null;
};
export default function ResponseTimeDistribution({ export default function ResponseTimeDistribution({
data, data,
average, average,
targetResponseTime, targetResponseTime,
}: ResponseTimeDistributionProps) { }: ResponseTimeDistributionProps) {
if (!data || !data.length) { const ref = useRef<HTMLCanvasElement | null>(null);
return (
<div className="flex items-center justify-center h-64 text-muted-foreground">
No response time data available
</div>
);
}
// Create bins for the histogram (0-1s, 1-2s, 2-3s, etc.) useEffect(() => {
const maxTime = Math.ceil(Math.max(...data)); if (!ref.current || !data || !data.length) return;
const bins = Array(Math.min(maxTime + 1, 10)).fill(0);
// Count responses in each bin const ctx = ref.current.getContext("2d");
data.forEach((time) => { if (!ctx) return;
const binIndex = Math.min(Math.floor(time), bins.length - 1);
bins[binIndex]++;
});
// Create chart data // Create bins for the histogram (0-1s, 1-2s, 2-3s, etc.)
const chartData = bins.map((count, i) => { const maxTime = Math.ceil(Math.max(...data));
let label; const bins = Array(Math.min(maxTime + 1, 10)).fill(0);
if (i === bins.length - 1 && bins.length < maxTime + 1) {
label = `${i}+ sec`;
} else {
label = `${i}-${i + 1} sec`;
}
// Determine color based on response time using cohesive palette // Count responses in each bin
let color; data.forEach((time) => {
if (i <= 2) color = "rgb(37, 99, 235)"; // Blue for fast (primary color) const binIndex = Math.min(Math.floor(time), bins.length - 1);
else if (i <= 5) color = "rgb(107, 114, 128)"; // Gray for medium bins[binIndex]++;
else color = "rgb(236, 72, 153)"; // Pink for slow });
return { // Create labels for each bin
name: label, const labels = bins.map((_, i) => {
value: count, if (i === bins.length - 1 && bins.length < maxTime + 1) {
color, return `${i}+ seconds`;
}; }
}); return `${i}-${i + 1} seconds`;
});
return ( const chart = new Chart(ctx, {
<div className="h-64"> type: "bar",
<ResponsiveContainer width="100%" height="100%"> data: {
<BarChart data={chartData} margin={{ top: 20, right: 30, left: 20, bottom: 5 }}> labels,
<CartesianGrid datasets: [
strokeDasharray="3 3" {
stroke="rgb(229, 231, 235)" label: "Responses",
strokeOpacity={0.5} data: bins,
/> backgroundColor: bins.map((_, i) => {
<XAxis // Green for fast, yellow for medium, red for slow
dataKey="name" if (i <= 2) return "rgba(34, 197, 94, 0.7)"; // Green
stroke="rgb(100, 116, 139)" if (i <= 5) return "rgba(250, 204, 21, 0.7)"; // Yellow
fontSize={12} return "rgba(239, 68, 68, 0.7)"; // Red
tickLine={false} }),
axisLine={false} borderWidth: 1,
/> },
<YAxis ],
stroke="rgb(100, 116, 139)" },
fontSize={12} options: {
tickLine={false} responsive: true,
axisLine={false} plugins: {
label={{ legend: { display: false },
value: 'Number of Responses', annotation: {
angle: -90, annotations: {
position: 'insideLeft', averageLine: {
style: { textAnchor: 'middle', fill: 'rgb(100, 116, 139)' } type: "line",
}} yMin: 0,
/> yMax: Math.max(...bins),
<Tooltip content={<CustomTooltip />} /> xMin: average,
xMax: average,
<Bar borderColor: "rgba(75, 192, 192, 1)",
dataKey="value" borderWidth: 2,
radius={[4, 4, 0, 0]} label: {
fill="hsl(var(--chart-1))" display: true,
> content: "Avg: " + average.toFixed(1) + "s",
{chartData.map((entry, index) => ( position: "start",
<Bar key={`cell-${index}`} fill={entry.color} /> },
))} },
</Bar> targetLine: targetResponseTime
? {
type: "line",
yMin: 0,
yMax: Math.max(...bins),
xMin: targetResponseTime,
xMax: targetResponseTime,
borderColor: "rgba(75, 192, 192, 0.7)",
borderWidth: 2,
label: {
display: true,
content: "Target",
position: "end",
},
}
: undefined,
},
},
},
scales: {
y: {
beginAtZero: true,
title: {
display: true,
text: "Number of Responses",
},
},
x: {
title: {
display: true,
text: "Response Time",
},
},
},
},
});
{/* Average line */} return () => chart.destroy();
<ReferenceLine }, [data, average, targetResponseTime]);
x={Math.floor(average)}
stroke="rgb(0, 123, 255)"
strokeWidth={2}
strokeDasharray="5 5"
label={{
value: `Avg: ${average.toFixed(1)}s`,
position: "top" as const,
style: {
fill: "rgb(0, 123, 255)",
fontSize: "12px",
fontWeight: "500"
}
}}
/>
{/* Target line (if provided) */} return <canvas ref={ref} height={180} />;
{targetResponseTime && (
<ReferenceLine
x={Math.floor(targetResponseTime)}
stroke="rgb(255, 20, 147)"
strokeWidth={2}
strokeDasharray="3 3"
label={{
value: `Target: ${targetResponseTime}s`,
position: "top" as const,
style: {
fill: "rgb(255, 20, 147)",
fontSize: "12px",
fontWeight: "500"
}
}}
/>
)}
</BarChart>
</ResponsiveContainer>
</div>
);
} }

View File

@ -70,17 +70,39 @@ export default function SessionDetails({ session }: SessionDetailsProps) {
{session.sentiment !== null && session.sentiment !== undefined && ( {session.sentiment !== null && session.sentiment !== undefined && (
<div className="flex justify-between border-b pb-2"> <div className="flex justify-between border-b pb-2">
<span className="text-gray-600">Sentiment:</span> <span className="text-gray-600">Sentiment Score:</span>
<span <span
className={`font-medium capitalize ${ className={`font-medium ${
session.sentiment === "POSITIVE" session.sentiment > 0.3
? "text-green-500" ? "text-green-500"
: session.sentiment === "NEGATIVE" : session.sentiment < -0.3
? "text-red-500" ? "text-red-500"
: "text-orange-500" : "text-orange-500"
}`} }`}
> >
{session.sentiment.toLowerCase()} {session.sentiment > 0.3
? "Positive"
: session.sentiment < -0.3
? "Negative"
: "Neutral"}{" "}
({session.sentiment.toFixed(2)})
</span>
</div>
)}
{session.sentimentCategory && (
<div className="flex justify-between border-b pb-2">
<span className="text-gray-600">AI Sentiment:</span>
<span
className={`font-medium capitalize ${
session.sentimentCategory === "positive"
? "text-green-500"
: session.sentimentCategory === "negative"
? "text-red-500"
: "text-orange-500"
}`}
>
{session.sentimentCategory}
</span> </span>
</div> </div>
)} )}
@ -90,6 +112,19 @@ export default function SessionDetails({ session }: SessionDetailsProps) {
<span className="font-medium">{session.messagesSent || 0}</span> <span className="font-medium">{session.messagesSent || 0}</span>
</div> </div>
{typeof session.tokens === "number" && (
<div className="flex justify-between border-b pb-2">
<span className="text-gray-600">Tokens:</span>
<span className="font-medium">{session.tokens}</span>
</div>
)}
{typeof session.tokensEur === "number" && (
<div className="flex justify-between border-b pb-2">
<span className="text-gray-600">Cost:</span>
<span className="font-medium">{session.tokensEur.toFixed(4)}</span>
</div>
)}
{session.avgResponseTime !== null && {session.avgResponseTime !== null &&
session.avgResponseTime !== undefined && ( session.avgResponseTime !== undefined && (
@ -132,12 +167,22 @@ export default function SessionDetails({ session }: SessionDetailsProps) {
</div> </div>
)} )}
{session.processed !== null && session.processed !== undefined && (
<div className="flex justify-between border-b pb-2">
<span className="text-gray-600">AI Processed:</span>
<span
className={`font-medium ${session.processed ? "text-green-500" : "text-gray-500"}`}
>
{session.processed ? "Yes" : "No"}
</span>
</div>
)}
{session.initialMsg && ( {session.initialMsg && (
<div className="border-b pb-2"> <div className="border-b pb-2">
<span className="text-gray-600 block mb-1">Initial Message:</span> <span className="text-gray-600 block mb-1">Initial Message:</span>
<div className="bg-gray-50 p-2 rounded text-sm italic"> <div className="bg-gray-50 p-2 rounded text-sm italic">
&quot;{session.initialMsg}&quot; "{session.initialMsg}"
</div> </div>
</div> </div>
)} )}
@ -151,6 +196,30 @@ export default function SessionDetails({ session }: SessionDetailsProps) {
</div> </div>
)} )}
{session.questions && (
<div className="border-b pb-2">
<span className="text-gray-600 block mb-1">Questions Asked:</span>
<div className="bg-yellow-50 p-2 rounded text-sm">
{(() => {
try {
const questions = JSON.parse(session.questions);
if (Array.isArray(questions) && questions.length > 0) {
return (
<ul className="list-disc list-inside space-y-1">
{questions.map((question: string, index: number) => (
<li key={index}>{question}</li>
))}
</ul>
);
}
return "No questions identified";
} catch {
return session.questions;
}
})()}
</div>
</div>
)}
{session.fullTranscriptUrl && ( {session.fullTranscriptUrl && (
<div className="flex justify-between pt-2"> <div className="flex justify-between pt-2">

View File

@ -167,7 +167,7 @@ const NavItem: React.FC<NavItemProps> = ({
} }
}} }}
> >
<span className={`shrink-0 ${isExpanded ? "mr-3" : "mx-auto"}`}> <span className={`flex-shrink-0 ${isExpanded ? "mr-3" : "mx-auto"}`}>
{icon} {icon}
</span> </span>
{isExpanded ? ( {isExpanded ? (
@ -334,7 +334,7 @@ export default function Sidebar({
isExpanded ? "" : "justify-center" isExpanded ? "" : "justify-center"
}`} }`}
> >
<span className={`shrink-0 ${isExpanded ? "mr-3" : ""}`}> <span className={`flex-shrink-0 ${isExpanded ? "mr-3" : ""}`}>
<LogoutIcon /> <LogoutIcon />
</span> </span>
{isExpanded ? ( {isExpanded ? (

View File

@ -1,19 +1,22 @@
'use client'; "use client";
import React from 'react'; import React from "react";
import { TopQuestion } from '../lib/types'; import { TopQuestion } from "../lib/types";
interface TopQuestionsChartProps { interface TopQuestionsChartProps {
data: TopQuestion[]; data: TopQuestion[];
title?: string; title?: string;
} }
export default function TopQuestionsChart({ data, title = "Top 5 Asked Questions" }: TopQuestionsChartProps) { export default function TopQuestionsChart({
data,
title = "Top 5 Asked Questions",
}: TopQuestionsChartProps) {
if (!data || data.length === 0) { if (!data || data.length === 0) {
return ( return (
<div className="bg-white p-6 rounded-2xl shadow-sm border border-gray-100"> <div className="bg-white p-6 rounded-lg shadow-sm border border-gray-200">
<h3 className="text-lg font-semibold text-gray-900 mb-6">{title}</h3> <h3 className="text-lg font-semibold text-gray-900 mb-4">{title}</h3>
<div className="text-center py-12 text-gray-500"> <div className="text-center py-8 text-gray-500">
No questions data available No questions data available
</div> </div>
</div> </div>
@ -21,41 +24,40 @@ export default function TopQuestionsChart({ data, title = "Top 5 Asked Questions
} }
// Find the maximum count to calculate relative bar widths // Find the maximum count to calculate relative bar widths
const maxCount = Math.max(...data.map(q => q.count)); const maxCount = Math.max(...data.map((q) => q.count));
return ( return (
<div className="bg-white p-6 rounded-2xl shadow-sm border border-gray-100"> <div className="bg-white p-6 rounded-lg shadow-sm border border-gray-200">
<h3 className="text-lg font-semibold text-gray-900 mb-6">{title}</h3> <h3 className="text-lg font-semibold text-gray-900 mb-4">{title}</h3>
<div className="space-y-6"> <div className="space-y-4">
{data.map((question, index) => { {data.map((question, index) => {
const percentage = maxCount > 0 ? (question.count / maxCount) * 100 : 0; const percentage =
maxCount > 0 ? (question.count / maxCount) * 100 : 0;
return ( return (
<div key={index} className="group"> <div key={index} className="relative">
{/* Rank and Question */} {/* Question text */}
<div className="flex items-start gap-4 mb-3"> <div className="flex justify-between items-start mb-2">
<div className="flex-shrink-0 w-8 h-8 bg-gray-100 text-gray-900 text-sm font-semibold rounded-full flex items-center justify-center"> <p className="text-sm text-gray-700 font-medium leading-tight pr-4 flex-1">
{index + 1} {question.question}
</div> </p>
<div className="flex-1 min-w-0"> <span className="text-sm font-semibold text-gray-900 bg-gray-100 px-2 py-1 rounded-md whitespace-nowrap">
<p className="text-sm font-medium text-gray-900 leading-relaxed mb-2"> {question.count}
{question.question} </span>
</p> </div>
<div className="flex items-center justify-between">
<div className="flex-1 mr-4"> {/* Progress bar */}
<div className="w-full bg-gray-100 rounded-full h-2"> <div className="w-full bg-gray-200 rounded-full h-2">
<div <div
className="bg-blue-600 h-2 rounded-full transition-all duration-500 ease-out" className="bg-blue-600 h-2 rounded-full transition-all duration-300 ease-in-out"
style={{ width: `${percentage}%` }} style={{ width: `${percentage}%` }}
/> />
</div> </div>
</div>
<span className="text-sm font-semibold text-gray-900 min-w-0"> {/* Rank indicator */}
{question.count} times <div className="absolute -left-2 top-0 w-6 h-6 bg-blue-600 text-white text-xs font-bold rounded-full flex items-center justify-center">
</span> {index + 1}
</div>
</div>
</div> </div>
</div> </div>
); );
@ -63,10 +65,10 @@ export default function TopQuestionsChart({ data, title = "Top 5 Asked Questions
</div> </div>
{/* Summary */} {/* Summary */}
<div className="mt-8 pt-6 border-t border-gray-100"> <div className="mt-6 pt-4 border-t border-gray-200">
<div className="flex justify-between items-center"> <div className="flex justify-between text-sm text-gray-600">
<span className="text-sm text-gray-600">Total questions analyzed</span> <span>Total questions analyzed</span>
<span className="text-sm font-semibold text-gray-900"> <span className="font-medium">
{data.reduce((sum, q) => sum + q.count, 0)} {data.reduce((sum, q) => sum + q.count, 0)}
</span> </span>
</div> </div>

View File

@ -22,7 +22,7 @@ export default function WelcomeBanner({ companyName }: WelcomeBannerProps) {
} }
return ( return (
<div className="bg-linear-to-r from-blue-600 to-indigo-700 text-white p-6 rounded-xl shadow-lg mb-8"> <div className="bg-gradient-to-r from-blue-600 to-indigo-700 text-white p-6 rounded-xl shadow-lg mb-8">
<div className="flex justify-between items-center"> <div className="flex justify-between items-center">
<div> <div>
<h1 className="text-3xl font-bold"> <h1 className="text-3xl font-bold">

View File

@ -1,105 +0,0 @@
"use client";
import {
BarChart,
Bar,
XAxis,
YAxis,
CartesianGrid,
Tooltip,
ResponsiveContainer,
Cell,
} from "recharts";
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
interface BarChartProps {
data: Array<{ name: string; value: number; [key: string]: any }>;
title?: string;
dataKey?: string;
colors?: string[];
height?: number;
className?: string;
}
const CustomTooltip = ({ active, payload, label }: any) => {
if (active && payload && payload.length) {
return (
<div className="rounded-lg border bg-background p-3 shadow-md">
<p className="text-sm font-medium">{label}</p>
<p className="text-sm text-muted-foreground">
<span className="font-medium text-foreground">
{payload[0].value}
</span>{" "}
sessions
</p>
</div>
);
}
return null;
};
export default function ModernBarChart({
data,
title,
dataKey = "value",
colors = [
"rgb(37, 99, 235)", // Blue (primary)
"rgb(107, 114, 128)", // Gray
"rgb(236, 72, 153)", // Pink
"rgb(34, 197, 94)", // Lime green
"rgb(168, 85, 247)", // Purple
],
height = 300,
className,
}: BarChartProps) {
return (
<Card className={className}>
{title && (
<CardHeader>
<CardTitle className="text-lg font-semibold">{title}</CardTitle>
</CardHeader>
)}
<CardContent>
<ResponsiveContainer width="100%" height={height}>
<BarChart data={data} margin={{ top: 5, right: 30, left: 20, bottom: 5 }}>
<CartesianGrid
strokeDasharray="3 3"
stroke="rgb(229, 231, 235)"
strokeOpacity={0.5}
/>
<XAxis
dataKey="name"
stroke="rgb(100, 116, 139)"
fontSize={12}
tickLine={false}
axisLine={false}
angle={-45}
textAnchor="end"
height={80}
/>
<YAxis
stroke="rgb(100, 116, 139)"
fontSize={12}
tickLine={false}
axisLine={false}
/>
<Tooltip content={<CustomTooltip />} />
<Bar
dataKey={dataKey}
radius={[4, 4, 0, 0]}
className="transition-all duration-200"
>
{data.map((entry, index) => (
<Cell
key={`cell-${index}`}
fill={colors[index % colors.length]}
className="hover:opacity-80"
/>
))}
</Bar>
</BarChart>
</ResponsiveContainer>
</CardContent>
</Card>
);
}

View File

@ -1,122 +0,0 @@
"use client";
import { PieChart, Pie, Cell, ResponsiveContainer, Tooltip, Legend } from "recharts";
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
interface DonutChartProps {
data: Array<{ name: string; value: number; color?: string }>;
title?: string;
centerText?: {
title: string;
value: string | number;
};
colors?: string[];
height?: number;
className?: string;
}
const CustomTooltip = ({ active, payload }: any) => {
if (active && payload && payload.length) {
const data = payload[0];
return (
<div className="rounded-lg border bg-background p-3 shadow-md">
<p className="text-sm font-medium">{data.name}</p>
<p className="text-sm text-muted-foreground">
<span className="font-medium text-foreground">
{data.value}
</span>{" "}
sessions ({((data.value / data.payload.total) * 100).toFixed(1)}%)
</p>
</div>
);
}
return null;
};
const CustomLegend = ({ payload }: any) => {
return (
<div className="flex flex-wrap justify-center gap-4 mt-4">
{payload.map((entry: any, index: number) => (
<div key={index} className="flex items-center gap-2">
<div
className="w-3 h-3 rounded-full"
style={{ backgroundColor: entry.color }}
/>
<span className="text-sm text-muted-foreground">{entry.value}</span>
</div>
))}
</div>
);
};
const CenterLabel = ({ centerText, total }: any) => {
if (!centerText) return null;
return (
<div className="absolute inset-0 flex items-center justify-center pointer-events-none">
<div className="text-center">
<p className="text-2xl font-bold">{centerText.value}</p>
<p className="text-sm text-muted-foreground">{centerText.title}</p>
</div>
</div>
);
};
export default function ModernDonutChart({
data,
title,
centerText,
colors = [
"rgb(37, 99, 235)", // Blue (primary)
"rgb(107, 114, 128)", // Gray
"rgb(236, 72, 153)", // Pink
"rgb(34, 197, 94)", // Lime green
"rgb(168, 85, 247)", // Purple
],
height = 300,
className,
}: DonutChartProps) {
const total = data.reduce((sum, item) => sum + item.value, 0);
const dataWithTotal = data.map(item => ({ ...item, total }));
return (
<Card className={className}>
{title && (
<CardHeader>
<CardTitle className="text-lg font-semibold">{title}</CardTitle>
</CardHeader>
)}
<CardContent>
<div className="relative">
<ResponsiveContainer width="100%" height={height}>
<PieChart>
<Pie
data={dataWithTotal}
cx="50%"
cy="50%"
innerRadius={60}
outerRadius={100}
paddingAngle={2}
dataKey="value"
className="transition-all duration-200"
>
{dataWithTotal.map((entry, index) => (
<Cell
key={`cell-${index}`}
fill={entry.color || colors[index % colors.length]}
className="hover:opacity-80 cursor-pointer"
stroke="white"
strokeWidth={2}
/>
))}
</Pie>
<Tooltip content={<CustomTooltip />} />
<Legend content={<CustomLegend />} />
</PieChart>
</ResponsiveContainer>
<CenterLabel centerText={centerText} total={total} />
</div>
</CardContent>
</Card>
);
}

View File

@ -1,117 +0,0 @@
"use client";
import {
LineChart,
Line,
XAxis,
YAxis,
CartesianGrid,
Tooltip,
ResponsiveContainer,
Area,
AreaChart,
} from "recharts";
import { Card, CardContent, CardHeader, CardTitle } from "@/components/ui/card";
interface LineChartProps {
data: Array<{ date: string; value: number; [key: string]: any }>;
title?: string;
dataKey?: string;
color?: string;
gradient?: boolean;
height?: number;
className?: string;
}
const CustomTooltip = ({ active, payload, label }: any) => {
if (active && payload && payload.length) {
return (
<div className="rounded-lg border bg-background p-3 shadow-md">
<p className="text-sm font-medium">{label}</p>
<p className="text-sm text-muted-foreground">
<span className="font-medium text-foreground">
{payload[0].value}
</span>{" "}
sessions
</p>
</div>
);
}
return null;
};
export default function ModernLineChart({
data,
title,
dataKey = "value",
color = "rgb(37, 99, 235)",
gradient = true,
height = 300,
className,
}: LineChartProps) {
const ChartComponent = gradient ? AreaChart : LineChart;
return (
<Card className={className}>
{title && (
<CardHeader>
<CardTitle className="text-lg font-semibold">{title}</CardTitle>
</CardHeader>
)}
<CardContent>
<ResponsiveContainer width="100%" height={height}>
<ChartComponent data={data} margin={{ top: 5, right: 30, left: 20, bottom: 5 }}>
<defs>
{gradient && (
<linearGradient id="colorGradient" x1="0" y1="0" x2="0" y2="1">
<stop offset="5%" stopColor={color} stopOpacity={0.3} />
<stop offset="95%" stopColor={color} stopOpacity={0.05} />
</linearGradient>
)}
</defs>
<CartesianGrid
strokeDasharray="3 3"
stroke="rgb(229, 231, 235)"
strokeOpacity={0.5}
/>
<XAxis
dataKey="date"
stroke="rgb(100, 116, 139)"
fontSize={12}
tickLine={false}
axisLine={false}
/>
<YAxis
stroke="rgb(100, 116, 139)"
fontSize={12}
tickLine={false}
axisLine={false}
/>
<Tooltip content={<CustomTooltip />} />
{gradient ? (
<Area
type="monotone"
dataKey={dataKey}
stroke={color}
strokeWidth={2}
fill="url(#colorGradient)"
dot={{ fill: color, strokeWidth: 2, r: 4 }}
activeDot={{ r: 6, stroke: color, strokeWidth: 2 }}
/>
) : (
<Line
type="monotone"
dataKey={dataKey}
stroke={color}
strokeWidth={2}
dot={{ fill: color, strokeWidth: 2, r: 4 }}
activeDot={{ r: 6, stroke: color, strokeWidth: 2 }}
/>
)}
</ChartComponent>
</ResponsiveContainer>
</CardContent>
</Card>
);
}

View File

@ -1,46 +0,0 @@
import * as React from "react"
import { Slot } from "@radix-ui/react-slot"
import { cva, type VariantProps } from "class-variance-authority"
import { cn } from "@/lib/utils"
const badgeVariants = cva(
"inline-flex items-center justify-center rounded-md border px-2 py-0.5 text-xs font-medium w-fit whitespace-nowrap shrink-0 [&>svg]:size-3 gap-1 [&>svg]:pointer-events-none focus-visible:border-ring focus-visible:ring-ring/50 focus-visible:ring-[3px] aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40 aria-invalid:border-destructive transition-[color,box-shadow] overflow-hidden",
{
variants: {
variant: {
default:
"border-transparent bg-primary text-primary-foreground [a&]:hover:bg-primary/90",
secondary:
"border-transparent bg-secondary text-secondary-foreground [a&]:hover:bg-secondary/90",
destructive:
"border-transparent bg-destructive text-white [a&]:hover:bg-destructive/90 focus-visible:ring-destructive/20 dark:focus-visible:ring-destructive/40 dark:bg-destructive/60",
outline:
"text-foreground [a&]:hover:bg-accent [a&]:hover:text-accent-foreground",
},
},
defaultVariants: {
variant: "default",
},
}
)
function Badge({
className,
variant,
asChild = false,
...props
}: React.ComponentProps<"span"> &
VariantProps<typeof badgeVariants> & { asChild?: boolean }) {
const Comp = asChild ? Slot : "span"
return (
<Comp
data-slot="badge"
className={cn(badgeVariants({ variant }), className)}
{...props}
/>
)
}
export { Badge, badgeVariants }

View File

@ -1,59 +0,0 @@
import * as React from "react"
import { Slot } from "@radix-ui/react-slot"
import { cva, type VariantProps } from "class-variance-authority"
import { cn } from "@/lib/utils"
const buttonVariants = cva(
"inline-flex items-center justify-center gap-2 whitespace-nowrap rounded-md text-sm font-medium transition-all disabled:pointer-events-none disabled:opacity-50 [&_svg]:pointer-events-none [&_svg:not([class*='size-'])]:size-4 shrink-0 [&_svg]:shrink-0 outline-none focus-visible:border-ring focus-visible:ring-ring/50 focus-visible:ring-[3px] aria-invalid:ring-destructive/20 dark:aria-invalid:ring-destructive/40 aria-invalid:border-destructive",
{
variants: {
variant: {
default:
"bg-primary text-primary-foreground shadow-xs hover:bg-primary/90",
destructive:
"bg-destructive text-white shadow-xs hover:bg-destructive/90 focus-visible:ring-destructive/20 dark:focus-visible:ring-destructive/40 dark:bg-destructive/60",
outline:
"border bg-background shadow-xs hover:bg-accent hover:text-accent-foreground dark:bg-input/30 dark:border-input dark:hover:bg-input/50",
secondary:
"bg-secondary text-secondary-foreground shadow-xs hover:bg-secondary/80",
ghost:
"hover:bg-accent hover:text-accent-foreground dark:hover:bg-accent/50",
link: "text-primary underline-offset-4 hover:underline",
},
size: {
default: "h-9 px-4 py-2 has-[>svg]:px-3",
sm: "h-8 rounded-md gap-1.5 px-3 has-[>svg]:px-2.5",
lg: "h-10 rounded-md px-6 has-[>svg]:px-4",
icon: "size-9",
},
},
defaultVariants: {
variant: "default",
size: "default",
},
}
)
function Button({
className,
variant,
size,
asChild = false,
...props
}: React.ComponentProps<"button"> &
VariantProps<typeof buttonVariants> & {
asChild?: boolean
}) {
const Comp = asChild ? Slot : "button"
return (
<Comp
data-slot="button"
className={cn(buttonVariants({ variant, size, className }))}
{...props}
/>
)
}
export { Button, buttonVariants }

View File

@ -1,92 +0,0 @@
import * as React from "react"
import { cn } from "@/lib/utils"
function Card({ className, ...props }: React.ComponentProps<"div">) {
return (
<div
data-slot="card"
className={cn(
"bg-white text-gray-900 flex flex-col gap-6 rounded-2xl border border-gray-100 py-6 shadow-sm",
className
)}
{...props}
/>
)
}
function CardHeader({ className, ...props }: React.ComponentProps<"div">) {
return (
<div
data-slot="card-header"
className={cn(
"@container/card-header grid auto-rows-min grid-rows-[auto_auto] items-start gap-1.5 px-6 has-data-[slot=card-action]:grid-cols-[1fr_auto] [.border-b]:pb-6",
className
)}
{...props}
/>
)
}
function CardTitle({ className, ...props }: React.ComponentProps<"div">) {
return (
<div
data-slot="card-title"
className={cn("leading-none font-semibold", className)}
{...props}
/>
)
}
function CardDescription({ className, ...props }: React.ComponentProps<"div">) {
return (
<div
data-slot="card-description"
className={cn("text-muted-foreground text-sm", className)}
{...props}
/>
)
}
function CardAction({ className, ...props }: React.ComponentProps<"div">) {
return (
<div
data-slot="card-action"
className={cn(
"col-start-2 row-span-2 row-start-1 self-start justify-self-end",
className
)}
{...props}
/>
)
}
function CardContent({ className, ...props }: React.ComponentProps<"div">) {
return (
<div
data-slot="card-content"
className={cn("px-6", className)}
{...props}
/>
)
}
function CardFooter({ className, ...props }: React.ComponentProps<"div">) {
return (
<div
data-slot="card-footer"
className={cn("flex items-center px-6 [.border-t]:pt-6", className)}
{...props}
/>
)
}
export {
Card,
CardHeader,
CardFooter,
CardTitle,
CardAction,
CardDescription,
CardContent,
}

View File

@ -1,257 +0,0 @@
"use client"
import * as React from "react"
import * as DropdownMenuPrimitive from "@radix-ui/react-dropdown-menu"
import { CheckIcon, ChevronRightIcon, CircleIcon } from "lucide-react"
import { cn } from "@/lib/utils"
function DropdownMenu({
...props
}: React.ComponentProps<typeof DropdownMenuPrimitive.Root>) {
return <DropdownMenuPrimitive.Root data-slot="dropdown-menu" {...props} />
}
function DropdownMenuPortal({
...props
}: React.ComponentProps<typeof DropdownMenuPrimitive.Portal>) {
return (
<DropdownMenuPrimitive.Portal data-slot="dropdown-menu-portal" {...props} />
)
}
function DropdownMenuTrigger({
...props
}: React.ComponentProps<typeof DropdownMenuPrimitive.Trigger>) {
return (
<DropdownMenuPrimitive.Trigger
data-slot="dropdown-menu-trigger"
{...props}
/>
)
}
function DropdownMenuContent({
className,
sideOffset = 4,
...props
}: React.ComponentProps<typeof DropdownMenuPrimitive.Content>) {
return (
<DropdownMenuPrimitive.Portal>
<DropdownMenuPrimitive.Content
data-slot="dropdown-menu-content"
sideOffset={sideOffset}
className={cn(
"bg-popover text-popover-foreground data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2 z-50 max-h-(--radix-dropdown-menu-content-available-height) min-w-32 origin-(--radix-dropdown-menu-content-transform-origin) overflow-x-hidden overflow-y-auto rounded-md border p-1 shadow-md",
className
)}
{...props}
/>
</DropdownMenuPrimitive.Portal>
)
}
function DropdownMenuGroup({
...props
}: React.ComponentProps<typeof DropdownMenuPrimitive.Group>) {
return (
<DropdownMenuPrimitive.Group data-slot="dropdown-menu-group" {...props} />
)
}
function DropdownMenuItem({
className,
inset,
variant = "default",
...props
}: React.ComponentProps<typeof DropdownMenuPrimitive.Item> & {
inset?: boolean
variant?: "default" | "destructive"
}) {
return (
<DropdownMenuPrimitive.Item
data-slot="dropdown-menu-item"
data-inset={inset}
data-variant={variant}
className={cn(
"focus:bg-accent focus:text-accent-foreground data-[variant=destructive]:text-destructive data-[variant=destructive]:focus:bg-destructive/10 dark:data-[variant=destructive]:focus:bg-destructive/20 data-[variant=destructive]:focus:text-destructive data-[variant=destructive]:*:[svg]:text-destructive! [&_svg:not([class*='text-'])]:text-muted-foreground relative flex cursor-default items-center gap-2 rounded-sm px-2 py-1.5 text-sm outline-hidden select-none data-disabled:pointer-events-none data-disabled:opacity-50 data-inset:pl-8 [&_svg]:pointer-events-none [&_svg]:shrink-0 [&_svg:not([class*='size-'])]:size-4",
className
)}
{...props}
/>
)
}
function DropdownMenuCheckboxItem({
className,
children,
checked,
...props
}: React.ComponentProps<typeof DropdownMenuPrimitive.CheckboxItem>) {
return (
<DropdownMenuPrimitive.CheckboxItem
data-slot="dropdown-menu-checkbox-item"
className={cn(
"focus:bg-accent focus:text-accent-foreground relative flex cursor-default items-center gap-2 rounded-sm py-1.5 pr-2 pl-8 text-sm outline-hidden select-none data-disabled:pointer-events-none data-disabled:opacity-50 [&_svg]:pointer-events-none [&_svg]:shrink-0 [&_svg:not([class*='size-'])]:size-4",
className
)}
checked={checked}
{...props}
>
<span className="pointer-events-none absolute left-2 flex size-3.5 items-center justify-center">
<DropdownMenuPrimitive.ItemIndicator>
<CheckIcon className="size-4" />
</DropdownMenuPrimitive.ItemIndicator>
</span>
{children}
</DropdownMenuPrimitive.CheckboxItem>
)
}
function DropdownMenuRadioGroup({
...props
}: React.ComponentProps<typeof DropdownMenuPrimitive.RadioGroup>) {
return (
<DropdownMenuPrimitive.RadioGroup
data-slot="dropdown-menu-radio-group"
{...props}
/>
)
}
function DropdownMenuRadioItem({
className,
children,
...props
}: React.ComponentProps<typeof DropdownMenuPrimitive.RadioItem>) {
return (
<DropdownMenuPrimitive.RadioItem
data-slot="dropdown-menu-radio-item"
className={cn(
"focus:bg-accent focus:text-accent-foreground relative flex cursor-default items-center gap-2 rounded-sm py-1.5 pr-2 pl-8 text-sm outline-hidden select-none data-disabled:pointer-events-none data-disabled:opacity-50 [&_svg]:pointer-events-none [&_svg]:shrink-0 [&_svg:not([class*='size-'])]:size-4",
className
)}
{...props}
>
<span className="pointer-events-none absolute left-2 flex size-3.5 items-center justify-center">
<DropdownMenuPrimitive.ItemIndicator>
<CircleIcon className="size-2 fill-current" />
</DropdownMenuPrimitive.ItemIndicator>
</span>
{children}
</DropdownMenuPrimitive.RadioItem>
)
}
function DropdownMenuLabel({
className,
inset,
...props
}: React.ComponentProps<typeof DropdownMenuPrimitive.Label> & {
inset?: boolean
}) {
return (
<DropdownMenuPrimitive.Label
data-slot="dropdown-menu-label"
data-inset={inset}
className={cn(
"px-2 py-1.5 text-sm font-medium data-inset:pl-8",
className
)}
{...props}
/>
)
}
function DropdownMenuSeparator({
className,
...props
}: React.ComponentProps<typeof DropdownMenuPrimitive.Separator>) {
return (
<DropdownMenuPrimitive.Separator
data-slot="dropdown-menu-separator"
className={cn("bg-border -mx-1 my-1 h-px", className)}
{...props}
/>
)
}
function DropdownMenuShortcut({
className,
...props
}: React.ComponentProps<"span">) {
return (
<span
data-slot="dropdown-menu-shortcut"
className={cn(
"text-muted-foreground ml-auto text-xs tracking-widest",
className
)}
{...props}
/>
)
}
function DropdownMenuSub({
...props
}: React.ComponentProps<typeof DropdownMenuPrimitive.Sub>) {
return <DropdownMenuPrimitive.Sub data-slot="dropdown-menu-sub" {...props} />
}
function DropdownMenuSubTrigger({
className,
inset,
children,
...props
}: React.ComponentProps<typeof DropdownMenuPrimitive.SubTrigger> & {
inset?: boolean
}) {
return (
<DropdownMenuPrimitive.SubTrigger
data-slot="dropdown-menu-sub-trigger"
data-inset={inset}
className={cn(
"focus:bg-accent focus:text-accent-foreground data-[state=open]:bg-accent data-[state=open]:text-accent-foreground flex cursor-default items-center rounded-sm px-2 py-1.5 text-sm outline-hidden select-none data-inset:pl-8",
className
)}
{...props}
>
{children}
<ChevronRightIcon className="ml-auto size-4" />
</DropdownMenuPrimitive.SubTrigger>
)
}
function DropdownMenuSubContent({
className,
...props
}: React.ComponentProps<typeof DropdownMenuPrimitive.SubContent>) {
return (
<DropdownMenuPrimitive.SubContent
data-slot="dropdown-menu-sub-content"
className={cn(
"bg-popover text-popover-foreground data-[state=open]:animate-in data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=open]:fade-in-0 data-[state=closed]:zoom-out-95 data-[state=open]:zoom-in-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2 z-50 min-w-32 origin-(--radix-dropdown-menu-content-transform-origin) overflow-hidden rounded-md border p-1 shadow-lg",
className
)}
{...props}
/>
)
}
export {
DropdownMenu,
DropdownMenuPortal,
DropdownMenuTrigger,
DropdownMenuContent,
DropdownMenuGroup,
DropdownMenuLabel,
DropdownMenuItem,
DropdownMenuCheckboxItem,
DropdownMenuRadioGroup,
DropdownMenuRadioItem,
DropdownMenuSeparator,
DropdownMenuShortcut,
DropdownMenuSub,
DropdownMenuSubTrigger,
DropdownMenuSubContent,
}

View File

@ -1,150 +0,0 @@
"use client";
import { Card, CardContent, CardHeader } from "@/components/ui/card";
import { Badge } from "@/components/ui/badge";
import { Skeleton } from "@/components/ui/skeleton";
import { cn } from "@/lib/utils";
import { TrendingUp, TrendingDown, Minus } from "lucide-react";
interface MetricCardProps {
title: string;
value: string | number | null | undefined;
description?: string;
icon?: React.ReactNode;
trend?: {
value: number;
label?: string;
isPositive?: boolean;
};
variant?: "default" | "primary" | "success" | "warning" | "danger";
isLoading?: boolean;
className?: string;
}
export default function MetricCard({
title,
value,
description,
icon,
trend,
variant = "default",
isLoading = false,
className,
}: MetricCardProps) {
if (isLoading) {
return (
<Card className={cn("relative overflow-hidden", className)}>
<CardHeader className="pb-3">
<div className="flex items-center justify-between">
<Skeleton className="h-4 w-24" />
<Skeleton className="h-10 w-10 rounded-full" />
</div>
</CardHeader>
<CardContent>
<Skeleton className="h-8 w-16 mb-2" />
<Skeleton className="h-3 w-20" />
</CardContent>
</Card>
);
}
const getVariantClasses = () => {
switch (variant) {
case "primary":
return "border border-blue-100 bg-white shadow-sm hover:shadow-md";
case "success":
return "border border-green-100 bg-white shadow-sm hover:shadow-md";
case "warning":
return "border border-pink-100 bg-white shadow-sm hover:shadow-md";
case "danger":
return "border border-red-100 bg-white shadow-sm hover:shadow-md";
default:
return "border border-gray-100 bg-white shadow-sm hover:shadow-md";
}
};
const getIconClasses = () => {
return "bg-gray-50 text-gray-900 border-gray-100";
};
const getTrendIcon = () => {
if (!trend) return null;
if (trend.value === 0) {
return <Minus className="h-3 w-3" />;
}
return trend.isPositive !== false ? (
<TrendingUp className="h-3 w-3" />
) : (
<TrendingDown className="h-3 w-3" />
);
};
const getTrendColor = () => {
if (!trend || trend.value === 0) return "text-muted-foreground";
return trend.isPositive !== false ? "text-green-600 dark:text-green-400" : "text-red-600 dark:text-red-400";
};
return (
<Card
className={cn(
"relative overflow-hidden transition-all duration-200 hover:shadow-lg hover:-translate-y-0.5",
getVariantClasses(),
className
)}
>
<CardHeader className="pb-3 relative">
<div className="flex items-start justify-between">
<div className="space-y-1">
<p className="text-sm font-medium text-gray-900 leading-none">
{title}
</p>
{description && (
<p className="text-xs text-muted-foreground/80">
{description}
</p>
)}
</div>
{icon && (
<div
className={cn(
"flex h-10 w-10 shrink-0 items-center justify-center rounded-full border transition-colors",
getIconClasses()
)}
>
<span className="text-lg">{icon}</span>
</div>
)}
</div>
</CardHeader>
<CardContent className="relative">
<div className="flex items-end justify-between">
<div className="space-y-1">
<p className="text-2xl font-bold tracking-tight text-gray-900">
{value ?? "—"}
</p>
{trend && (
<Badge
variant="secondary"
className={cn(
"text-xs font-medium px-2 py-0.5 gap-1",
getTrendColor(),
"bg-background/50 border-current/20"
)}
>
{getTrendIcon()}
{Math.abs(trend.value).toFixed(1)}%
{trend.label && ` ${trend.label}`}
</Badge>
)}
</div>
</div>
</CardContent>
</Card>
);
}

View File

@ -1,28 +0,0 @@
"use client"
import * as React from "react"
import * as SeparatorPrimitive from "@radix-ui/react-separator"
import { cn } from "@/lib/utils"
function Separator({
className,
orientation = "horizontal",
decorative = true,
...props
}: React.ComponentProps<typeof SeparatorPrimitive.Root>) {
return (
<SeparatorPrimitive.Root
data-slot="separator"
decorative={decorative}
orientation={orientation}
className={cn(
"bg-border shrink-0 data-[orientation=horizontal]:h-px data-[orientation=horizontal]:w-full data-[orientation=vertical]:h-full data-[orientation=vertical]:w-px",
className
)}
{...props}
/>
)
}
export { Separator }

View File

@ -1,13 +0,0 @@
import { cn } from "@/lib/utils"
function Skeleton({ className, ...props }: React.ComponentProps<"div">) {
return (
<div
data-slot="skeleton"
className={cn("bg-accent animate-pulse rounded-md", className)}
{...props}
/>
)
}
export { Skeleton }

View File

@ -1,61 +0,0 @@
"use client"
import * as React from "react"
import * as TooltipPrimitive from "@radix-ui/react-tooltip"
import { cn } from "@/lib/utils"
function TooltipProvider({
delayDuration = 0,
...props
}: React.ComponentProps<typeof TooltipPrimitive.Provider>) {
return (
<TooltipPrimitive.Provider
data-slot="tooltip-provider"
delayDuration={delayDuration}
{...props}
/>
)
}
function Tooltip({
...props
}: React.ComponentProps<typeof TooltipPrimitive.Root>) {
return (
<TooltipProvider>
<TooltipPrimitive.Root data-slot="tooltip" {...props} />
</TooltipProvider>
)
}
function TooltipTrigger({
...props
}: React.ComponentProps<typeof TooltipPrimitive.Trigger>) {
return <TooltipPrimitive.Trigger data-slot="tooltip-trigger" {...props} />
}
function TooltipContent({
className,
sideOffset = 0,
children,
...props
}: React.ComponentProps<typeof TooltipPrimitive.Content>) {
return (
<TooltipPrimitive.Portal>
<TooltipPrimitive.Content
data-slot="tooltip-content"
sideOffset={sideOffset}
className={cn(
"bg-primary text-primary-foreground animate-in fade-in-0 zoom-in-95 data-[state=closed]:animate-out data-[state=closed]:fade-out-0 data-[state=closed]:zoom-out-95 data-[side=bottom]:slide-in-from-top-2 data-[side=left]:slide-in-from-right-2 data-[side=right]:slide-in-from-left-2 data-[side=top]:slide-in-from-bottom-2 z-50 w-fit origin-(--radix-tooltip-content-transform-origin) rounded-md px-3 py-1.5 text-xs text-balance",
className
)}
{...props}
>
{children}
<TooltipPrimitive.Arrow className="bg-primary fill-primary z-50 size-2.5 translate-y-[calc(-50%-2px)] rotate-45 rounded-[2px]" />
</TooltipPrimitive.Content>
</TooltipPrimitive.Portal>
)
}
export { Tooltip, TooltipTrigger, TooltipContent, TooltipProvider }

View File

@ -1,81 +0,0 @@
import { PrismaClient } from '@prisma/client';
import { ProcessingStatusManager } from './lib/processingStatusManager';
const prisma = new PrismaClient();
async function debugImportStatus() {
try {
console.log('=== DEBUGGING PROCESSING STATUS (REFACTORED SYSTEM) ===\n');
// Get pipeline status using the new system
const pipelineStatus = await ProcessingStatusManager.getPipelineStatus();
console.log(`Total Sessions: ${pipelineStatus.totalSessions}\n`);
// Display status for each stage
const stages = ['CSV_IMPORT', 'TRANSCRIPT_FETCH', 'SESSION_CREATION', 'AI_ANALYSIS', 'QUESTION_EXTRACTION'];
for (const stage of stages) {
console.log(`${stage}:`);
const stageData = pipelineStatus.pipeline[stage] || {};
const pending = stageData.PENDING || 0;
const inProgress = stageData.IN_PROGRESS || 0;
const completed = stageData.COMPLETED || 0;
const failed = stageData.FAILED || 0;
const skipped = stageData.SKIPPED || 0;
console.log(` PENDING: ${pending}`);
console.log(` IN_PROGRESS: ${inProgress}`);
console.log(` COMPLETED: ${completed}`);
console.log(` FAILED: ${failed}`);
console.log(` SKIPPED: ${skipped}`);
console.log('');
}
// Check Sessions vs SessionImports
console.log('=== SESSION IMPORT RELATIONSHIP ===');
const sessionsWithImports = await prisma.session.count({
where: { importId: { not: null } }
});
const totalSessions = await prisma.session.count();
console.log(` Sessions with importId: ${sessionsWithImports}`);
console.log(` Total sessions: ${totalSessions}`);
// Show failed sessions if any
const failedSessions = await ProcessingStatusManager.getFailedSessions();
if (failedSessions.length > 0) {
console.log('\n=== FAILED SESSIONS ===');
failedSessions.slice(0, 10).forEach(failure => {
console.log(` ${failure.session.import?.externalSessionId || failure.sessionId}: ${failure.stage} - ${failure.errorMessage}`);
});
if (failedSessions.length > 10) {
console.log(` ... and ${failedSessions.length - 10} more failed sessions`);
}
} else {
console.log('\n✓ No failed sessions found');
}
// Show what needs processing
console.log('\n=== WHAT NEEDS PROCESSING ===');
for (const stage of stages) {
const stageData = pipelineStatus.pipeline[stage] || {};
const pending = stageData.PENDING || 0;
const failed = stageData.FAILED || 0;
if (pending > 0 || failed > 0) {
console.log(`${stage}: ${pending} pending, ${failed} failed`);
}
}
} catch (error) {
console.error('Error debugging processing status:', error);
} finally {
await prisma.$disconnect();
}
}
debugImportStatus();

View File

@ -0,0 +1,213 @@
# 🤖 Automated Processing System Documentation
## 🎯 Overview
The LiveDash system now features a complete automated processing pipeline that:
-**Processes ALL unprocessed sessions** in batches until completion
-**Runs hourly** to check for new unprocessed sessions
-**Triggers automatically** when dashboard refresh is pressed
-**Validates data quality** and filters out low-quality sessions
-**Requires zero manual intervention** for ongoing operations
---
## 🔄 Complete Workflow
### 1. **CSV Import** (Automatic/Manual)
```
📥 CSV Data → Session Records (processed: false)
```
- **Automatic**: Hourly scheduler imports new CSV data
- **Manual**: Dashboard refresh button triggers immediate import
- **Result**: New sessions created with `processed: false`
### 2. **Transcript Fetching** (As Needed)
```
🔗 fullTranscriptUrl → Message Records
```
- **Script**: `node scripts/fetch-and-parse-transcripts.js`
- **Purpose**: Convert transcript URLs into message records
- **Status**: Only sessions with messages can be AI processed
### 3. **AI Processing** (Automatic/Manual)
```
💬 Messages → 🤖 OpenAI Analysis → 📊 Structured Data
```
- **Automatic**: Hourly scheduler processes all unprocessed sessions
- **Manual**: Dashboard refresh or direct script execution
- **Batch Processing**: Processes ALL unprocessed sessions until none remain
- **Quality Validation**: Filters out empty questions and short summaries
---
## 🚀 Automated Triggers
### **Hourly Scheduler**
```javascript
// Runs every hour automatically
cron.schedule("0 * * * *", async () => {
await processUnprocessedSessions(); // Process ALL until completion
});
```
### **Dashboard Refresh**
```javascript
// When user clicks refresh in dashboard
POST /api/admin/refresh-sessions
Import new CSV data
Automatically trigger processUnprocessedSessions()
```
### **Manual Processing**
```bash
# Process all unprocessed sessions until completion
npx tsx scripts/trigger-processing-direct.js
# Check system status
node scripts/check-database-status.js
# Complete workflow demonstration
npx tsx scripts/complete-workflow-demo.js
```
---
## 📊 Processing Logic
### **Batch Processing Algorithm**
```javascript
while (true) {
// Get next batch of unprocessed sessions with messages
const sessions = await findUnprocessedSessions(batchSize: 10);
if (sessions.length === 0) {
console.log("✅ All sessions processed!");
break;
}
// Process batch with concurrency limit
await processInParallel(sessions, maxConcurrency: 3);
// Small delay between batches
await delay(1000ms);
}
```
### **Quality Validation**
```javascript
// Check data quality after AI processing
const hasValidQuestions = questions.length > 0;
const hasValidSummary = summary.length >= 10;
const isValidData = hasValidQuestions && hasValidSummary;
if (!isValidData) {
console.log("⚠️ Session marked as invalid data");
}
```
---
## 🎯 System Behavior
### **What Gets Processed**
- ✅ Sessions with `processed: false`
- ✅ Sessions that have message records
- ❌ Sessions without messages (skipped until transcripts fetched)
- ❌ Already processed sessions (ignored)
### **Processing Results**
- **Valid Sessions**: Full AI analysis with categories, questions, summary
- **Invalid Sessions**: Marked as processed but flagged as low-quality
- **Failed Sessions**: Error logged, remains unprocessed for retry
### **Dashboard Integration**
- **Refresh Button**: Imports CSV + triggers processing automatically
- **Real-time Updates**: Processing happens in background
- **Quality Filtering**: Only meaningful conversations shown in analytics
---
## 📈 Current System Status
```
📊 Database Status:
📈 Total sessions: 108
✅ Processed sessions: 20 (All sessions with messages)
⏳ Unprocessed sessions: 88 (Sessions without transcript messages)
💬 Sessions with messages: 20 (Ready for/already processed)
🏢 Total companies: 1
🎯 System State: FULLY OPERATIONAL
✅ All sessions with messages have been processed
✅ Automated processing ready for new data
✅ Quality validation working perfectly
```
---
## 🛠️ Available Scripts
### **Core Processing**
```bash
# Process all unprocessed sessions (complete batch processing)
npx tsx scripts/trigger-processing-direct.js
# Check database status
node scripts/check-database-status.js
# Fetch missing transcripts
node scripts/fetch-and-parse-transcripts.js
```
### **Data Management**
```bash
# Import fresh CSV data
node scripts/trigger-csv-refresh.js
# Reset all sessions to unprocessed (for reprocessing)
node scripts/reset-processed-status.js
```
### **System Demonstration**
```bash
# Complete workflow demonstration
npx tsx scripts/complete-workflow-demo.js
```
---
## 🎉 Key Achievements
### **✅ Complete Automation**
- **Zero manual intervention** needed for ongoing operations
- **Hourly processing** of any new unprocessed sessions
- **Dashboard integration** with automatic processing triggers
### **✅ Batch Processing**
- **Processes ALL unprocessed sessions** until none remain
- **Configurable batch sizes** and concurrency limits
- **Progress tracking** with detailed logging
### **✅ Quality Validation**
- **Automatic filtering** of low-quality sessions
- **Enhanced OpenAI prompts** with crystal-clear instructions
- **Data quality checks** before and after processing
### **✅ Production Ready**
- **Error handling** and retry logic
- **Background processing** without blocking responses
- **Comprehensive logging** for monitoring and debugging
---
## 🚀 Production Deployment
The system is now **100% ready for production** with:
1. **Automated CSV import** every hour
2. **Automated AI processing** every hour
3. **Dashboard refresh integration** for immediate processing
4. **Quality validation** to ensure clean analytics
5. **Complete batch processing** until all sessions are analyzed
**No manual intervention required** - the system will automatically process all new data as it arrives!

View File

@ -12,10 +12,10 @@ The WordCloud component visualizes categories or topics based on their frequency
**Features:** **Features:**
- Dynamic sizing based on frequency - Dynamic sizing based on frequency
- Colorful display with a pleasing color palette - Colorful display with a pleasing color palette
- Responsive design - Responsive design
- Interactive hover effects - Interactive hover effects
### 2. GeographicMap ### 2. GeographicMap
@ -25,10 +25,10 @@ This component displays a world map with circles representing the number of sess
**Features:** **Features:**
- Interactive map using React Leaflet - Interactive map using React Leaflet
- Circle sizes scaled by session count - Circle sizes scaled by session count
- Tooltips showing country names and session counts - Tooltips showing country names and session counts
- Responsive design - Responsive design
### 3. MetricCard ### 3. MetricCard
@ -38,10 +38,10 @@ A modern, visually appealing card for displaying key metrics.
**Features:** **Features:**
- Multiple design variants (default, primary, success, warning, danger) - Multiple design variants (default, primary, success, warning, danger)
- Support for trend indicators - Support for trend indicators
- Icons and descriptions - Icons and descriptions
- Clean, modern styling - Clean, modern styling
### 4. DonutChart ### 4. DonutChart
@ -51,10 +51,10 @@ An enhanced donut chart with better styling and a central text display capabilit
**Features:** **Features:**
- Customizable colors - Customizable colors
- Center text area for displaying summaries - Center text area for displaying summaries
- Interactive tooltips with percentages - Interactive tooltips with percentages
- Well-balanced legend display - Well-balanced legend display
### 5. ResponseTimeDistribution ### 5. ResponseTimeDistribution
@ -64,28 +64,28 @@ Visualizes the distribution of response times as a histogram.
**Features:** **Features:**
- Color-coded bars (green for fast, yellow for medium, red for slow) - Color-coded bars (green for fast, yellow for medium, red for slow)
- Target time indicator - Target time indicator
- Automatic binning of response times - Automatic binning of response times
- Clear labeling and scales - Clear labeling and scales
## Dashboard Enhancements ## Dashboard Enhancements
The dashboard has been enhanced with: The dashboard has been enhanced with:
1. **Improved Layout**: Better use of space and responsive grid layouts 1. **Improved Layout**: Better use of space and responsive grid layouts
2. **Visual Hierarchies**: Clear heading styles and consistent spacing 2. **Visual Hierarchies**: Clear heading styles and consistent spacing
3. **Color Coding**: Semantic use of colors to indicate statuses 3. **Color Coding**: Semantic use of colors to indicate statuses
4. **Interactive Elements**: Better button styles with loading indicators 4. **Interactive Elements**: Better button styles with loading indicators
5. **Data Context**: More complete view of metrics with additional visualizations 5. **Data Context**: More complete view of metrics with additional visualizations
6. **Geographic Insights**: Map view of session distribution by country 6. **Geographic Insights**: Map view of session distribution by country
7. **Language Analysis**: Improved language distribution visualization 7. **Language Analysis**: Improved language distribution visualization
8. **Category Analysis**: Word cloud for category popularity 8. **Category Analysis**: Word cloud for category popularity
9. **Performance Metrics**: Response time distribution for better insight into system performance 9. **Performance Metrics**: Response time distribution for better insight into system performance
## Usage Notes ## Usage Notes
- The geographic map and response time distribution use simulated data where actual data is not available - The geographic map and response time distribution use simulated data where actual data is not available
- All components are responsive and will adjust to different screen sizes - All components are responsive and will adjust to different screen sizes
- The dashboard automatically refreshes data when using the refresh button - The dashboard automatically refreshes data when using the refresh button
- Admin users have access to additional controls at the bottom of the dashboard - Admin users have access to additional controls at the bottom of the dashboard

View File

@ -1,130 +0,0 @@
# PostgreSQL Migration Documentation
## Overview
Successfully migrated the livedash-node application from SQLite to PostgreSQL using Neon as the database provider. This migration provides better scalability, performance, and production-readiness.
## Migration Summary
### What Was Changed
1. **Database Provider**: Changed from SQLite to PostgreSQL in `prisma/schema.prisma`
2. **Environment Configuration**: Updated to use environment-based database URL selection
3. **Test Setup**: Configured separate test database using `DATABASE_URL_TEST`
4. **Migration History**: Reset and created fresh PostgreSQL migrations
### Database Configuration
#### Production/Development
- **Provider**: PostgreSQL (Neon)
- **Environment Variable**: `DATABASE_URL`
- **Connection**: Neon PostgreSQL cluster
#### Testing
- **Provider**: PostgreSQL (Neon - separate database)
- **Environment Variable**: `DATABASE_URL_TEST`
- **Test Setup**: Automatically switches to test database during test runs
### Files Modified
1. **`prisma/schema.prisma`**
- Changed provider from `sqlite` to `postgresql`
- Updated URL to use `env("DATABASE_URL")`
2. **`tests/setup.ts`**
- Added logic to use `DATABASE_URL_TEST` when available
- Ensures test isolation with separate database
3. **`.env`** (created)
- Contains `DATABASE_URL` for Prisma CLI operations
4. **`.env.local`** (existing)
- Contains both `DATABASE_URL` and `DATABASE_URL_TEST`
### Database Schema
All existing models and relationships were preserved:
- **Company**: Multi-tenant root entity
- **User**: Authentication and authorization
- **Session**: Processed session data
- **SessionImport**: Raw CSV import data
- **Message**: Individual conversation messages
- **Question**: Normalized question storage
- **SessionQuestion**: Session-question relationships
- **AIProcessingRequest**: AI cost tracking
### Migration Process
1. **Schema Update**: Changed provider to PostgreSQL
2. **Migration Reset**: Removed SQLite migration history
3. **Fresh Migration**: Created new PostgreSQL migration
4. **Client Generation**: Generated new Prisma client for PostgreSQL
5. **Database Seeding**: Applied initial seed data
6. **Testing**: Verified all functionality works with PostgreSQL
### Benefits Achieved
**Production-Ready**: PostgreSQL is enterprise-grade and scalable
**Better Performance**: Superior query performance and optimization
**Advanced Features**: Full JSON support, arrays, advanced indexing
**Test Isolation**: Separate test database prevents data conflicts
**Consistency**: Same database engine across all environments
**Cloud-Native**: Neon provides managed PostgreSQL with excellent DX
### Environment Variables
```env
# Production/Development Database
DATABASE_URL="postgresql://user:pass@host/database?sslmode=require"
# Test Database (separate Neon database)
DATABASE_URL_TEST="postgresql://user:pass@test-host/test-database?sslmode=require"
```
### Test Configuration
Tests automatically use the test database when `DATABASE_URL_TEST` is set:
```typescript
// In tests/setup.ts
if (process.env.DATABASE_URL_TEST) {
process.env.DATABASE_URL = process.env.DATABASE_URL_TEST;
}
```
### Verification
All tests pass successfully:
- ✅ Environment configuration tests
- ✅ Transcript fetcher tests
- ✅ Database connection tests
- ✅ Schema validation tests
- ✅ CRUD operation tests
### Next Steps
1. **Data Import**: Import production data if needed
2. **Performance Monitoring**: Monitor query performance in production
3. **Backup Strategy**: Configure automated backups via Neon
4. **Connection Pooling**: Consider connection pooling for high-traffic scenarios
### Rollback Plan
If rollback is needed:
1. Revert `prisma/schema.prisma` to SQLite configuration
2. Restore SQLite migration files from git history
3. Update environment variables
4. Run `prisma migrate reset` and `prisma generate`
## Conclusion
The PostgreSQL migration was successful and provides a solid foundation for production deployment. The application now benefits from PostgreSQL's advanced features while maintaining full test isolation and development workflow compatibility.

View File

@ -1,133 +0,0 @@
# Processing System Refactor - Complete
## Overview
Successfully refactored the session processing pipeline from a simple status-based system to a comprehensive multi-stage processing status system. This addresses the original issues with the SessionImport table's `status` and `errorMsg` columns.
## Problems Solved
### Original Issues
1. **Inconsistent Status Tracking**: The old system used a simple enum on SessionImport that didn't properly track the multi-stage processing pipeline
2. **Poor Error Visibility**: Error messages were buried in the SessionImport table and not easily accessible
3. **No Stage-Specific Tracking**: The system couldn't track which specific stage of processing failed
4. **Difficult Recovery**: Failed sessions were hard to identify and retry
5. **Linting Errors**: Multiple TypeScript files referencing removed database fields
### Schema Changes Made
- **Removed** old `status`, `errorMsg`, and `processedAt` columns from SessionImport
- **Removed** `processed` field from Session
- **Added** new `SessionProcessingStatus` table with granular stage tracking
- **Added** `ProcessingStage` and `ProcessingStatus` enums
## New Processing Pipeline
### Processing Stages
```typescript
enum ProcessingStage {
CSV_IMPORT // SessionImport created
TRANSCRIPT_FETCH // Transcript content fetched
SESSION_CREATION // Session + Messages created
AI_ANALYSIS // AI processing completed
QUESTION_EXTRACTION // Questions extracted
}
enum ProcessingStatus {
PENDING, IN_PROGRESS, COMPLETED, FAILED, SKIPPED
}
```
### Key Components
#### 1. ProcessingStatusManager
Centralized class for managing processing status with methods:
- `initializeSession()` - Set up processing status for new sessions
- `startStage()`, `completeStage()`, `failStage()`, `skipStage()` - Stage management
- `getSessionsNeedingProcessing()` - Query sessions by stage and status
- `getPipelineStatus()` - Get overview of entire pipeline
- `getFailedSessions()` - Find sessions needing retry
- `resetStageForRetry()` - Reset failed stages
#### 2. Updated Processing Scheduler
- Integrated with new `ProcessingStatusManager`
- Tracks AI analysis and question extraction stages
- Records detailed processing metadata
- Proper error handling and retry capabilities
#### 3. Migration System
- Successfully migrated all 109 existing sessions
- Determined current state based on existing data
- Preserved all existing functionality
## Current Pipeline Status
After migration and refactoring:
- **CSV_IMPORT**: 109 completed
- **TRANSCRIPT_FETCH**: 109 completed
- **SESSION_CREATION**: 109 completed
- **AI_ANALYSIS**: 16 completed, 93 pending
- **QUESTION_EXTRACTION**: 11 completed, 98 pending
## Files Updated/Created
### New Files
- `lib/processingStatusManager.ts` - Core processing status management
- `check-refactored-pipeline-status.ts` - New pipeline status checker
- `migrate-to-refactored-system.ts` - Migration script
- `docs/processing-system-refactor.md` - This documentation
### Updated Files
- `prisma/schema.prisma` - Added new processing status tables
- `lib/processingScheduler.ts` - Integrated with new status system
- `debug-import-status.ts` - Updated to use new system
- `fix-import-status.ts` - Updated to use new system
### Removed Files
- `check-pipeline-status.ts` - Replaced by refactored version
## Benefits Achieved
1. **Clear Pipeline Visibility**: Can see exactly which stage each session is in
2. **Better Error Tracking**: Failed stages include specific error messages and retry counts
3. **Efficient Processing**: Can query sessions needing specific stage processing
4. **Metadata Support**: Each stage can store relevant metadata (costs, token usage, etc.)
5. **Easy Recovery**: Failed sessions can be easily identified and retried
6. **Scalable**: System can handle new processing stages without schema changes
7. **No Linting Errors**: All TypeScript compilation issues resolved
## Usage Examples
### Check Pipeline Status
```bash
npx tsx check-refactored-pipeline-status.ts
```
### Debug Processing Issues
```bash
npx tsx debug-import-status.ts
```
### Fix/Retry Failed Sessions
```bash
npx tsx fix-import-status.ts
```
### Process Sessions
```bash
npx tsx test-ai-processing.ts
```
## Next Steps
1. **Test AI Processing**: Run AI processing on pending sessions
2. **Monitor Performance**: Watch for any issues with the new system
3. **Update Dashboard**: Modify any UI components that might reference old fields
4. **Documentation**: Update any API documentation that references the old system
## Migration Notes
- All existing data preserved
- No data loss during migration
- Backward compatibility maintained where possible
- System ready for production use
The refactored system provides much better visibility into the processing pipeline and makes it easy to identify and resolve any issues that arise during session processing.

View File

@ -8,8 +8,8 @@
**Solution**: **Solution**:
- Added validation in `fetchAndStoreSessionsForAllCompanies()` to skip companies with example/invalid URLs - Added validation in `fetchAndStoreSessionsForAllCompanies()` to skip companies with example/invalid URLs
- Removed the invalid company record from the database using `fix_companies.js` - Removed the invalid company record from the database using `fix_companies.js`
### 2. Transcript Fetching Errors ### 2. Transcript Fetching Errors
@ -17,10 +17,10 @@
**Solution**: **Solution**:
- Improved error handling in `fetchTranscriptContent()` function - Improved error handling in `fetchTranscriptContent()` function
- Added probabilistic logging (only ~10% of errors logged) to prevent log spam - Added probabilistic logging (only ~10% of errors logged) to prevent log spam
- Added timeout (10 seconds) for transcript fetching - Added timeout (10 seconds) for transcript fetching
- Made transcript fetching failures non-blocking (sessions are still created without transcript content) - Made transcript fetching failures non-blocking (sessions are still created without transcript content)
### 3. CSV Fetching Errors ### 3. CSV Fetching Errors
@ -28,8 +28,8 @@
**Solution**: **Solution**:
- Added URL validation to skip companies with `example.com` URLs - Added URL validation to skip companies with `example.com` URLs
- Improved error logging to be more descriptive - Improved error logging to be more descriptive
## Current Status ## Current Status
@ -42,22 +42,22 @@
After cleanup, only valid companies remain: After cleanup, only valid companies remain:
- **Demo Company** (`790b9233-d369-451f-b92c-f4dceb42b649`) - **Demo Company** (`790b9233-d369-451f-b92c-f4dceb42b649`)
- CSV URL: `https://proto.notso.ai/jumbo/chats` - CSV URL: `https://proto.notso.ai/jumbo/chats`
- Has valid authentication credentials - Has valid authentication credentials
- 107 sessions in database - 107 sessions in database
## Files Modified ## Files Modified
1. **lib/csvFetcher.js** 1. **lib/csvFetcher.js**
- Added company URL validation - Added company URL validation
- Improved transcript fetching error handling - Improved transcript fetching error handling
- Reduced error log verbosity - Reduced error log verbosity
2. **fix_companies.js** (cleanup script) 2. **fix_companies.js** (cleanup script)
- Removes invalid company records - Removes invalid company records
- Can be run again if needed - Can be run again if needed
## Monitoring ## Monitoring
@ -73,7 +73,7 @@ node -e "import('./lib/csvFetcher.js').then(m => m.fetchAndStoreSessionsForAllCo
## Future Improvements ## Future Improvements
1. Add health check endpoint for scheduler status 1. Add health check endpoint for scheduler status
2. Add metrics for successful/failed fetches 2. Add metrics for successful/failed fetches
3. Consider retry logic for temporary failures 3. Consider retry logic for temporary failures
4. Add alerting for persistent failures 4. Add alerting for persistent failures

View File

@ -1,28 +1,24 @@
# Scheduler Workflow Documentation # Scheduler Workflow Documentation
## Overview ## Overview
The LiveDash system has two main schedulers that work together to fetch and process session data: The LiveDash system has two main schedulers that work together to fetch and process session data:
1. **Session Refresh Scheduler** - Fetches new sessions from CSV files 1. **Session Refresh Scheduler** - Fetches new sessions from CSV files
2. **Processing Scheduler** - Processes session transcripts with AI 2. **Processing Scheduler** - Processes session transcripts with AI
## Current Status (as of latest check) ## Current Status (as of latest check)
- **Total sessions**: 107
- **Total sessions**: 107 - **Processed sessions**: 0
- **Processed sessions**: 0 - **Sessions with transcript**: 0
- **Sessions with transcript**: 0 - **Ready for processing**: 0
- **Ready for processing**: 0
## How the `processed` Field Works ## How the `processed` Field Works
The ProcessingScheduler picks up sessions where `processed` is **NOT** `true`, which includes: The ProcessingScheduler picks up sessions where `processed` is **NOT** `true`, which includes:
- `processed = false`
- `processed = false` - `processed = null`
- `processed = null`
**Query used:** **Query used:**
```javascript ```javascript
{ processed: { not: true } } // Either false or null { processed: { not: true } } // Either false or null
``` ```
@ -30,60 +26,50 @@ The ProcessingScheduler picks up sessions where `processed` is **NOT** `true`, w
## Complete Workflow ## Complete Workflow
### Step 1: Session Refresh (CSV Fetching) ### Step 1: Session Refresh (CSV Fetching)
**What it does:** **What it does:**
- Fetches session data from company CSV URLs
- Fetches session data from company CSV URLs - Creates session records in database with basic metadata
- Creates session records in database with basic metadata - Sets `transcriptContent = null` initially
- Sets `transcriptContent = null` initially - Sets `processed = null` initially
- Sets `processed = null` initially
**Runs:** Every 30 minutes (cron: `*/30 * * * *`) **Runs:** Every 30 minutes (cron: `*/30 * * * *`)
### Step 2: Transcript Fetching ### Step 2: Transcript Fetching
**What it does:** **What it does:**
- Downloads full transcript content for sessions
- Downloads full transcript content for sessions - Updates `transcriptContent` field with actual conversation data
- Updates `transcriptContent` field with actual conversation data - Sessions remain `processed = null` until AI processing
- Sessions remain `processed = null` until AI processing
**Runs:** As part of session refresh process **Runs:** As part of session refresh process
### Step 3: AI Processing ### Step 3: AI Processing
**What it does:** **What it does:**
- Finds sessions with transcript content where `processed != true`
- Finds sessions with transcript content where `processed != true` - Sends transcripts to OpenAI for analysis
- Sends transcripts to OpenAI for analysis - Extracts: sentiment, category, questions, summary, etc.
- Extracts: sentiment, category, questions, summary, etc. - Updates session with processed data
- Updates session with processed data - Sets `processed = true`
- Sets `processed = true`
**Runs:** Every hour (cron: `0 * * * *`) **Runs:** Every hour (cron: `0 * * * *`)
## Manual Trigger Commands ## Manual Trigger Commands
### Check Current Status ### Check Current Status
```bash ```bash
node scripts/manual-triggers.js status node scripts/manual-triggers.js status
``` ```
### Trigger Session Refresh (Fetch new sessions from CSV) ### Trigger Session Refresh (Fetch new sessions from CSV)
```bash ```bash
node scripts/manual-triggers.js refresh node scripts/manual-triggers.js refresh
``` ```
### Trigger AI Processing (Process unprocessed sessions) ### Trigger AI Processing (Process unprocessed sessions)
```bash ```bash
node scripts/manual-triggers.js process node scripts/manual-triggers.js process
``` ```
### Run Both Schedulers ### Run Both Schedulers
```bash ```bash
node scripts/manual-triggers.js both node scripts/manual-triggers.js both
``` ```
@ -91,42 +77,36 @@ node scripts/manual-triggers.js both
## Troubleshooting ## Troubleshooting
### No Sessions Being Processed? ### No Sessions Being Processed?
1. **Check if sessions have transcripts:**
1. **Check if sessions have transcripts:**
```bash ```bash
node scripts/manual-triggers.js status node scripts/manual-triggers.js status
``` ```
2. **If "Sessions with transcript" is 0:** 2. **If "Sessions with transcript" is 0:**
- Sessions exist but transcripts haven't been fetched yet - Sessions exist but transcripts haven't been fetched yet
- Run session refresh: `node scripts/manual-triggers.js refresh` - Run session refresh: `node scripts/manual-triggers.js refresh`
3. **If "Ready for processing" is 0 but "Sessions with transcript" > 0:** 3. **If "Ready for processing" is 0 but "Sessions with transcript" > 0:**
- All sessions with transcripts have already been processed - All sessions with transcripts have already been processed
- Check if `OPENAI_API_KEY` is set in environment - Check if `OPENAI_API_KEY` is set in environment
### Common Issues ### Common Issues
#### "No sessions found requiring processing" #### "No sessions found requiring processing"
- All sessions with transcripts have been processed (`processed = true`)
- All sessions with transcripts have been processed (`processed = true`) - Or no sessions have transcript content yet
- Or no sessions have transcript content yet
#### "OPENAI_API_KEY environment variable is not set" #### "OPENAI_API_KEY environment variable is not set"
- Add OpenAI API key to `.env.development` file
- Add OpenAI API key to `.env.development` file - Restart the application
- Restart the application
#### "Error fetching transcript: Unauthorized" #### "Error fetching transcript: Unauthorized"
- CSV credentials are incorrect or expired
- CSV credentials are incorrect or expired - Check company CSV username/password in database
- Check company CSV username/password in database
## Database Field Mapping ## Database Field Mapping
### Before AI Processing ### Before AI Processing
```javascript ```javascript
{ {
id: "session-uuid", id: "session-uuid",
@ -140,7 +120,6 @@ node scripts/manual-triggers.js both
``` ```
### After AI Processing ### After AI Processing
```javascript ```javascript
{ {
id: "session-uuid", id: "session-uuid",
@ -162,17 +141,15 @@ node scripts/manual-triggers.js both
## Scheduler Configuration ## Scheduler Configuration
### Session Refresh Scheduler ### Session Refresh Scheduler
- **File**: `lib/scheduler.js`
- **File**: `lib/scheduler.js` - **Frequency**: Every 30 minutes
- **Frequency**: Every 30 minutes - **Cron**: `*/30 * * * *`
- **Cron**: `*/30 * * * *`
### Processing Scheduler ### Processing Scheduler
- **File**: `lib/processingScheduler.js`
- **File**: `lib/processingScheduler.js` - **Frequency**: Every hour
- **Frequency**: Every hour - **Cron**: `0 * * * *`
- **Cron**: `0 * * * *` - **Batch size**: 10 sessions per run
- **Batch size**: 10 sessions per run
## Environment Variables Required ## Environment Variables Required
@ -190,22 +167,19 @@ NEXTAUTH_URL="http://localhost:3000"
## Next Steps for Testing ## Next Steps for Testing
1. **Trigger session refresh** to fetch transcripts: 1. **Trigger session refresh** to fetch transcripts:
```bash ```bash
node scripts/manual-triggers.js refresh node scripts/manual-triggers.js refresh
``` ```
2. **Check status** to see if transcripts were fetched: 2. **Check status** to see if transcripts were fetched:
```bash ```bash
node scripts/manual-triggers.js status node scripts/manual-triggers.js status
``` ```
3. **Trigger processing** if transcripts are available: 3. **Trigger processing** if transcripts are available:
```bash ```bash
node scripts/manual-triggers.js process node scripts/manual-triggers.js process
``` ```
4. **View results** in the dashboard session details pages 4. **View results** in the dashboard session details pages

View File

@ -6,47 +6,47 @@ This document explains how the session processing system works in LiveDash-Node.
The system now includes an automated process for analyzing chat session transcripts using OpenAI's API. This process: The system now includes an automated process for analyzing chat session transcripts using OpenAI's API. This process:
1. Fetches session data from CSV sources 1. Fetches session data from CSV sources
2. Only adds new sessions that don't already exist in the database 2. Only adds new sessions that don't already exist in the database
3. Processes session transcripts with OpenAI to extract valuable insights 3. Processes session transcripts with OpenAI to extract valuable insights
4. Updates the database with the processed information 4. Updates the database with the processed information
## How It Works ## How It Works
### Session Fetching ### Session Fetching
- The system fetches session data from configured CSV URLs for each company - The system fetches session data from configured CSV URLs for each company
- Unlike the previous implementation, it now only adds sessions that don't already exist in the database - Unlike the previous implementation, it now only adds sessions that don't already exist in the database
- This prevents duplicate sessions and allows for incremental updates - This prevents duplicate sessions and allows for incremental updates
### Transcript Processing ### Transcript Processing
- For sessions with transcript content that haven't been processed yet, the system calls OpenAI's API - For sessions with transcript content that haven't been processed yet, the system calls OpenAI's API
- The API analyzes the transcript and extracts the following information: - The API analyzes the transcript and extracts the following information:
- Primary language used (ISO 639-1 code) - Primary language used (ISO 639-1 code)
- Number of messages sent by the user - Number of messages sent by the user
- Overall sentiment (positive, neutral, negative) - Overall sentiment (positive, neutral, negative)
- Whether the conversation was escalated - Whether the conversation was escalated
- Whether HR contact was mentioned or provided - Whether HR contact was mentioned or provided
- Best-fitting category for the conversation - Best-fitting category for the conversation
- Up to 5 paraphrased questions asked by the user - Up to 5 paraphrased questions asked by the user
- A brief summary of the conversation - A brief summary of the conversation
### Scheduling ### Scheduling
The system includes two schedulers: The system includes two schedulers:
1. **Session Refresh Scheduler**: Runs every 15 minutes to fetch new sessions from CSV sources 1. **Session Refresh Scheduler**: Runs every 15 minutes to fetch new sessions from CSV sources
2. **Session Processing Scheduler**: Runs every hour to process unprocessed sessions with OpenAI 2. **Session Processing Scheduler**: Runs every hour to process unprocessed sessions with OpenAI
## Database Schema ## Database Schema
The Session model has been updated with new fields to store the processed data: The Session model has been updated with new fields to store the processed data:
- `processed`: Boolean flag indicating whether the session has been processed - `processed`: Boolean flag indicating whether the session has been processed
- `sentimentCategory`: String value ("positive", "neutral", "negative") from OpenAI - `sentimentCategory`: String value ("positive", "neutral", "negative") from OpenAI
- `questions`: JSON array of questions asked by the user - `questions`: JSON array of questions asked by the user
- `summary`: Brief summary of the conversation - `summary`: Brief summary of the conversation
## Configuration ## Configuration
@ -62,9 +62,9 @@ OPENAI_API_KEY=your_api_key_here
To run the application with schedulers enabled: To run the application with schedulers enabled:
- Development: `npm run dev` - Development: `npm run dev`
- Development (with schedulers disabled): `npm run dev:no-schedulers` - Development (with schedulers disabled): `npm run dev:no-schedulers`
- Production: `npm run start` - Production: `npm run start`
Note: These commands will start a custom Next.js server with the schedulers enabled. You'll need to have an OpenAI API key set in your `.env.local` file for the session processing to work. Note: These commands will start a custom Next.js server with the schedulers enabled. You'll need to have an OpenAI API key set in your `.env.local` file for the session processing to work.
@ -82,5 +82,5 @@ This will process all unprocessed sessions that have transcript content.
The processing logic can be customized by modifying: The processing logic can be customized by modifying:
- `lib/processingScheduler.ts`: Contains the OpenAI processing logic - `lib/processingScheduler.ts`: Contains the OpenAI processing logic
- `scripts/process_sessions.ts`: Standalone script for manual processing - `scripts/process_sessions.ts`: Standalone script for manual processing

View File

@ -1,13 +1,11 @@
# Transcript Parsing Implementation # Transcript Parsing Implementation
## Overview ## Overview
Added structured message parsing to the LiveDash system, allowing transcripts to be broken down into individual messages with timestamps, roles, and content. This provides a much better user experience for viewing conversations. Added structured message parsing to the LiveDash system, allowing transcripts to be broken down into individual messages with timestamps, roles, and content. This provides a much better user experience for viewing conversations.
## Database Changes ## Database Changes
### New Message Table ### New Message Table
```sql ```sql
CREATE TABLE Message ( CREATE TABLE Message (
id TEXT PRIMARY KEY DEFAULT (uuid()), id TEXT PRIMARY KEY DEFAULT (uuid()),
@ -24,14 +22,12 @@ CREATE INDEX Message_sessionId_order_idx ON Message(sessionId, order);
``` ```
### Updated Session Table ### Updated Session Table
- Added `messages` relation to Session model
- Added `messages` relation to Session model - Sessions can now have both raw transcript content AND parsed messages
- Sessions can now have both raw transcript content AND parsed messages
## New Components ## New Components
### 1. Message Interface (`lib/types.ts`) ### 1. Message Interface (`lib/types.ts`)
```typescript ```typescript
export interface Message { export interface Message {
id: string; id: string;
@ -45,43 +41,36 @@ export interface Message {
``` ```
### 2. Transcript Parser (`lib/transcriptParser.js`) ### 2. Transcript Parser (`lib/transcriptParser.js`)
- **`parseChatLogToJSON(logString)`** - Parses raw transcript text into structured messages
- **`parseChatLogToJSON(logString)`** - Parses raw transcript text into structured messages - **`storeMessagesForSession(sessionId, messages)`** - Stores parsed messages in database
- **`storeMessagesForSession(sessionId, messages)`** - Stores parsed messages in database - **`processTranscriptForSession(sessionId, transcriptContent)`** - Complete processing for one session
- **`processTranscriptForSession(sessionId, transcriptContent)`** - Complete processing for one session - **`processAllUnparsedTranscripts()`** - Batch process all unparsed transcripts
- **`processAllUnparsedTranscripts()`** - Batch process all unparsed transcripts - **`getMessagesForSession(sessionId)`** - Retrieve messages for a session
- **`getMessagesForSession(sessionId)`** - Retrieve messages for a session
### 3. MessageViewer Component (`components/MessageViewer.tsx`) ### 3. MessageViewer Component (`components/MessageViewer.tsx`)
- Chat-like interface for displaying parsed messages
- Chat-like interface for displaying parsed messages - Color-coded by role (User: blue, Assistant: gray, System: yellow)
- Color-coded by role (User: blue, Assistant: gray, System: yellow) - Shows timestamps and message order
- Shows timestamps and message order - Scrollable with conversation metadata
- Scrollable with conversation metadata
## Updated Components ## Updated Components
### 1. Session API (`pages/api/dashboard/session/[id].ts`) ### 1. Session API (`pages/api/dashboard/session/[id].ts`)
- Now includes parsed messages in session response
- Now includes parsed messages in session response - Messages are ordered by `order` field (ascending)
- Messages are ordered by `order` field (ascending)
### 2. Session Details Page (`app/dashboard/sessions/[id]/page.tsx`) ### 2. Session Details Page (`app/dashboard/sessions/[id]/page.tsx`)
- Added MessageViewer component
- Added MessageViewer component - Shows both parsed messages AND raw transcript
- Shows both parsed messages AND raw transcript - Prioritizes parsed messages when available
- Prioritizes parsed messages when available
### 3. ChatSession Interface (`lib/types.ts`) ### 3. ChatSession Interface (`lib/types.ts`)
- Added optional `messages?: Message[]` field
- Added optional `messages?: Message[]` field
## Parsing Logic ## Parsing Logic
### Supported Format ### Supported Format
The parser expects transcript format: The parser expects transcript format:
``` ```
[DD.MM.YYYY HH:MM:SS] Role: Message content [DD.MM.YYYY HH:MM:SS] Role: Message content
[DD.MM.YYYY HH:MM:SS] User: Hello, I need help [DD.MM.YYYY HH:MM:SS] User: Hello, I need help
@ -89,17 +78,15 @@ The parser expects transcript format:
``` ```
### Features ### Features
- **Multi-line support** - Messages can span multiple lines
- **Multi-line support** - Messages can span multiple lines - **Timestamp parsing** - Converts DD.MM.YYYY HH:MM:SS to ISO format
- **Timestamp parsing** - Converts DD.MM.YYYY HH:MM:SS to ISO format - **Role detection** - Extracts sender role from each message
- **Role detection** - Extracts sender role from each message - **Ordering** - Maintains conversation order with explicit order field
- **Ordering** - Maintains conversation order with explicit order field - **Sorting** - Messages sorted by timestamp, then by role (User before Assistant)
- **Sorting** - Messages sorted by timestamp, then by role (User before Assistant)
## Manual Commands ## Manual Commands
### New Commands Added ### New Commands Added
```bash ```bash
# Parse transcripts into structured messages # Parse transcripts into structured messages
node scripts/manual-triggers.js parse node scripts/manual-triggers.js parse
@ -112,20 +99,17 @@ node scripts/manual-triggers.js status
``` ```
### Updated Commands ### Updated Commands
- **`status`** - Now shows transcript and parsing statistics
- **`status`** - Now shows transcript and parsing statistics - **`all`** - New command that runs refresh → parse → process in sequence
- **`all`** - New command that runs refresh → parse → process in sequence
## Workflow Integration ## Workflow Integration
### Complete Processing Pipeline ### Complete Processing Pipeline
1. **Session Refresh** - Fetch sessions from CSV, download transcripts
1. **Session Refresh** - Fetch sessions from CSV, download transcripts 2. **Transcript Parsing** - Parse raw transcripts into structured messages
2. **Transcript Parsing** - Parse raw transcripts into structured messages 3. **AI Processing** - Process sessions with OpenAI for sentiment, categories, etc.
3. **AI Processing** - Process sessions with OpenAI for sentiment, categories, etc.
### Database States ### Database States
```javascript ```javascript
// After CSV fetch // After CSV fetch
{ {
@ -155,24 +139,21 @@ node scripts/manual-triggers.js status
## User Experience Improvements ## User Experience Improvements
### Before ### Before
- Only raw transcript text in a text area
- Only raw transcript text in a text area - Difficult to follow conversation flow
- Difficult to follow conversation flow - No clear distinction between speakers
- No clear distinction between speakers
### After ### After
- **Chat-like interface** with message bubbles
- **Chat-like interface** with message bubbles - **Color-coded roles** for easy identification
- **Color-coded roles** for easy identification - **Timestamps** for each message
- **Timestamps** for each message - **Conversation metadata** (first/last message times)
- **Conversation metadata** (first/last message times) - **Fallback to raw transcript** if parsing fails
- **Fallback to raw transcript** if parsing fails - **Both views available** - structured AND raw
- **Both views available** - structured AND raw
## Testing ## Testing
### Manual Testing Commands ### Manual Testing Commands
```bash ```bash
# Check current status # Check current status
node scripts/manual-triggers.js status node scripts/manual-triggers.js status
@ -185,44 +166,38 @@ node scripts/manual-triggers.js all
``` ```
### Expected Results ### Expected Results
1. Sessions with transcript content get parsed into individual messages
1. Sessions with transcript content get parsed into individual messages 2. Session detail pages show chat-like interface
2. Session detail pages show chat-like interface 3. Both parsed messages and raw transcript are available
3. Both parsed messages and raw transcript are available 4. No data loss - original transcript content preserved
4. No data loss - original transcript content preserved
## Technical Benefits ## Technical Benefits
### Performance ### Performance
- **Indexed queries** - Messages indexed by sessionId and order
- **Indexed queries** - Messages indexed by sessionId and order - **Efficient loading** - Only load messages when needed
- **Efficient loading** - Only load messages when needed - **Cascading deletes** - Messages automatically deleted with sessions
- **Cascading deletes** - Messages automatically deleted with sessions
### Maintainability ### Maintainability
- **Separation of concerns** - Parsing logic isolated in dedicated module
- **Separation of concerns** - Parsing logic isolated in dedicated module - **Type safety** - Full TypeScript support for Message interface
- **Type safety** - Full TypeScript support for Message interface - **Error handling** - Graceful fallbacks when parsing fails
- **Error handling** - Graceful fallbacks when parsing fails
### Extensibility ### Extensibility
- **Role flexibility** - Supports any role names (User, Assistant, System, etc.)
- **Role flexibility** - Supports any role names (User, Assistant, System, etc.) - **Content preservation** - Multi-line messages fully supported
- **Content preservation** - Multi-line messages fully supported - **Metadata ready** - Easy to add message-level metadata in future
- **Metadata ready** - Easy to add message-level metadata in future
## Migration Notes ## Migration Notes
### Existing Data ### Existing Data
- **No data loss** - Original transcript content preserved
- **No data loss** - Original transcript content preserved - **Backward compatibility** - Pages work with or without parsed messages
- **Backward compatibility** - Pages work with or without parsed messages - **Gradual migration** - Can parse transcripts incrementally
- **Gradual migration** - Can parse transcripts incrementally
### Database Migration ### Database Migration
- New Message table created with foreign key constraints
- New Message table created with foreign key constraints - Existing Session table unchanged (only added relation)
- Existing Session table unchanged (only added relation) - Index created for efficient message queries
- Index created for efficient message queries
This implementation provides a solid foundation for enhanced conversation analysis and user experience while maintaining full backward compatibility. This implementation provides a solid foundation for enhanced conversation analysis and user experience while maintaining full backward compatibility.

View File

@ -1,88 +0,0 @@
import { PrismaClient, ProcessingStage, ProcessingStatus } from '@prisma/client';
import { ProcessingStatusManager } from './lib/processingStatusManager';
const prisma = new PrismaClient();
async function fixProcessingStatus() {
try {
console.log('=== FIXING PROCESSING STATUS (REFACTORED SYSTEM) ===\n');
// Check for any failed processing stages that might need retry
const failedSessions = await ProcessingStatusManager.getFailedSessions();
console.log(`Found ${failedSessions.length} failed processing stages`);
if (failedSessions.length > 0) {
console.log('\nFailed sessions by stage:');
const failuresByStage: Record<string, number> = {};
failedSessions.forEach(failure => {
failuresByStage[failure.stage] = (failuresByStage[failure.stage] || 0) + 1;
});
Object.entries(failuresByStage).forEach(([stage, count]) => {
console.log(` ${stage}: ${count} failures`);
});
// Show sample failed sessions
console.log('\nSample failed sessions:');
failedSessions.slice(0, 5).forEach(failure => {
console.log(` ${failure.session.import?.externalSessionId || failure.sessionId}: ${failure.stage} - ${failure.errorMessage}`);
});
// Ask if user wants to reset failed stages for retry
console.log('\nTo reset failed stages for retry, you can use:');
console.log('ProcessingStatusManager.resetStageForRetry(sessionId, stage)');
}
// Check for sessions that might be stuck in IN_PROGRESS
const stuckSessions = await prisma.sessionProcessingStatus.findMany({
where: {
status: ProcessingStatus.IN_PROGRESS,
startedAt: {
lt: new Date(Date.now() - 30 * 60 * 1000) // Started more than 30 minutes ago
}
},
include: {
session: {
include: {
import: true
}
}
}
});
if (stuckSessions.length > 0) {
console.log(`\nFound ${stuckSessions.length} sessions stuck in IN_PROGRESS state:`);
stuckSessions.forEach(stuck => {
console.log(` ${stuck.session.import?.externalSessionId || stuck.sessionId}: ${stuck.stage} (started: ${stuck.startedAt})`);
});
console.log('\nThese sessions may need to be reset to PENDING status for retry.');
}
// Show current pipeline status
console.log('\n=== CURRENT PIPELINE STATUS ===');
const pipelineStatus = await ProcessingStatusManager.getPipelineStatus();
const stages = ['CSV_IMPORT', 'TRANSCRIPT_FETCH', 'SESSION_CREATION', 'AI_ANALYSIS', 'QUESTION_EXTRACTION'];
for (const stage of stages) {
const stageData = pipelineStatus.pipeline[stage] || {};
const pending = stageData.PENDING || 0;
const inProgress = stageData.IN_PROGRESS || 0;
const completed = stageData.COMPLETED || 0;
const failed = stageData.FAILED || 0;
const skipped = stageData.SKIPPED || 0;
console.log(`${stage}: ${completed} completed, ${pending} pending, ${inProgress} in progress, ${failed} failed, ${skipped} skipped`);
}
} catch (error) {
console.error('Error fixing processing status:', error);
} finally {
await prisma.$disconnect();
}
}
fixProcessingStatus();

50
lib/admin-service.ts Normal file
View File

@ -0,0 +1,50 @@
import { getServerSession } from "next-auth";
import { authOptions } from "../app/api/auth/[...nextauth]/route"; // Adjust path as needed
import { prisma } from "./prisma";
import { processUnprocessedSessions } from "./processingSchedulerNoCron";
export async function getAdminUser() {
const session = await getServerSession(authOptions);
if (!session?.user) {
throw new Error("Not logged in");
}
const user = await prisma.user.findUnique({
where: { email: session.user.email as string },
include: { company: true },
});
if (!user) {
throw new Error("No user found");
}
if (user.role !== "admin") {
throw new Error("Admin access required");
}
return user;
}
export async function triggerSessionProcessing(batchSize?: number, maxConcurrency?: number) {
const unprocessedCount = await prisma.session.count({
where: {
processed: false,
messages: { some: {} }, // Must have messages
},
});
if (unprocessedCount === 0) {
return { message: "No unprocessed sessions found", unprocessedCount: 0, processedCount: 0 };
}
processUnprocessedSessions(batchSize, maxConcurrency)
.then(() => {
console.log(`[Manual Trigger] Processing completed`);
})
.catch((error) => {
console.error(`[Manual Trigger] Processing failed:`, error);
});
return { message: `Started processing ${unprocessedCount} unprocessed sessions`, unprocessedCount };
}

7
lib/auth-service.ts Normal file
View File

@ -0,0 +1,7 @@
import { prisma } from "./prisma";
export async function findUserByEmail(email: string) {
return prisma.user.findUnique({
where: { email },
});
}

View File

@ -1,41 +1,232 @@
// Simplified CSV fetcher - fetches and parses CSV data without any processing // Fetches, parses, and returns chat session data for a company from a CSV URL
// Maps directly to SessionImport table fields
import fetch from "node-fetch"; import fetch from "node-fetch";
import { parse } from "csv-parse/sync"; import { parse } from "csv-parse/sync";
import ISO6391 from "iso-639-1";
import countries from "i18n-iso-countries";
// Raw CSV data interface matching SessionImport schema // Register locales for i18n-iso-countries
interface RawSessionImport { import enLocale from "i18n-iso-countries/langs/en.json" with { type: "json" };
externalSessionId: string; countries.registerLocale(enLocale);
startTimeRaw: string;
endTimeRaw: string; // This type is used internally for parsing the CSV records
ipAddress: string | null; interface CSVRecord {
countryCode: string | null; session_id: string;
language: string | null; start_time: string;
messagesSent: number | null; end_time?: string;
sentimentRaw: string | null; ip_address?: string;
escalatedRaw: string | null; country?: string;
forwardedHrRaw: string | null; language?: string;
fullTranscriptUrl: string | null; messages_sent?: string;
avgResponseTimeSeconds: number | null; sentiment?: string;
tokens: number | null; escalated?: string;
tokensEur: number | null; forwarded_hr?: string;
category: string | null; full_transcript_url?: string;
initialMessage: string | null; avg_response_time?: string;
tokens?: string;
tokens_eur?: string;
category?: string;
initial_msg?: string;
[key: string]: string | undefined;
}
interface SessionData {
id: string;
sessionId: string;
startTime: Date;
endTime: Date | null;
ipAddress?: string;
country?: string | null;
language?: string | null;
messagesSent: number;
sentiment?: string | null;
escalated: boolean;
forwardedHr: boolean;
fullTranscriptUrl?: string | null;
avgResponseTime: number | null;
tokens: number;
tokensEur: number;
category?: string | null;
initialMsg?: string;
} }
/** /**
* Fetches and parses CSV data from a URL without any processing * Passes through country data as-is (no mapping)
* Maps CSV columns by position to SessionImport fields * @param countryStr Raw country string from CSV
* @param url The CSV URL * @returns The country string as-is or null if empty
* @param username Optional username for authentication
* @param password Optional password for authentication
* @returns Array of raw session import data
*/ */
function getCountryCode(countryStr?: string): string | null | undefined {
if (countryStr === undefined) return undefined;
if (countryStr === null || countryStr === "") return null;
const normalized = countryStr.trim();
return normalized || null;
}
/**
* Converts language names to ISO 639-1 codes
* @param languageStr Raw language string from CSV
* @returns ISO 639-1 language code or null if not found
*/
function getLanguageCode(languageStr?: string): string | null | undefined {
if (languageStr === undefined) return undefined;
if (languageStr === null || languageStr === "") return null;
// Clean the input
const normalized = languageStr.trim();
if (!normalized) return null;
// Direct ISO code check (if already a 2-letter code)
if (normalized.length === 2 && normalized === normalized.toLowerCase()) {
return ISO6391.validate(normalized) ? normalized : null;
}
// Special case mappings
const languageMapping: Record<string, string> = {
english: "en",
English: "en",
dutch: "nl",
Dutch: "nl",
nederlands: "nl",
Nederlands: "nl",
nl: "nl",
bosnian: "bs",
Bosnian: "bs",
turkish: "tr",
Turkish: "tr",
german: "de",
German: "de",
deutsch: "de",
Deutsch: "de",
french: "fr",
French: "fr",
français: "fr",
Français: "fr",
spanish: "es",
Spanish: "es",
español: "es",
Español: "es",
italian: "it",
Italian: "it",
italiano: "it",
Italiano: "it",
nizozemski: "nl", // "Dutch" in some Slavic languages
};
// Check mapping
if (normalized in languageMapping) {
return languageMapping[normalized];
}
// Try to get code using the ISO6391 library
try {
const code = ISO6391.getCode(normalized);
if (code) return code;
} catch (error) {
process.stderr.write(
`[CSV] Error converting language name to code: ${normalized} - ${error}\n`
);
}
// If all else fails, return null
return null;
}
/**
* Passes through category data as-is (no mapping)
* @param categoryStr The raw category string from CSV
* @returns The category string as-is or null if empty
*/
function normalizeCategory(categoryStr?: string): string | null {
if (!categoryStr) return null;
const normalized = categoryStr.trim();
return normalized || null;
}
/**
* Checks if a string value should be considered as boolean true
* @param value The string value to check
* @returns True if the string indicates a positive/true value
*/
function isTruthyValue(value?: string): boolean {
if (!value) return false;
const truthyValues = [
"1",
"true",
"yes",
"y",
"ja",
"si",
"oui",
"да",
"да",
"はい",
];
return truthyValues.includes(value.toLowerCase());
}
/**
* Safely parses a date string into a Date object.
* Handles potential errors and various formats, prioritizing D-M-YYYY HH:MM:SS.
* @param dateStr The date string to parse.
* @returns A Date object or null if parsing fails.
*/
function safeParseDate(dateStr?: string): Date | null {
if (!dateStr) return null;
// Try to parse D-M-YYYY HH:MM:SS format (with hyphens or dots)
const dateTimeRegex =
/^(\d{1,2})[.-](\d{1,2})[.-](\d{4}) (\d{1,2}):(\d{1,2}):(\d{1,2})$/;
const match = dateStr.match(dateTimeRegex);
if (match) {
const day = match[1];
const month = match[2];
const year = match[3];
const hour = match[4];
const minute = match[5];
const second = match[6];
// Reformat to YYYY-MM-DDTHH:MM:SS (ISO-like, but local time)
// Ensure month and day are two digits
const formattedDateStr = `${year}-${month.padStart(2, "0")}-${day.padStart(2, "0")}T${hour.padStart(2, "0")}:${minute.padStart(2, "0")}:${second.padStart(2, "0")}`;
try {
const date = new Date(formattedDateStr);
// Basic validation: check if the constructed date is valid
if (!isNaN(date.getTime())) {
// console.log(`[safeParseDate] Parsed from D-M-YYYY: ${dateStr} -> ${formattedDateStr} -> ${date.toISOString()}`);
return date;
}
} catch (e) {
console.warn(
`[safeParseDate] Error parsing reformatted string ${formattedDateStr} from ${dateStr}:`,
e
);
}
}
// Fallback for other potential formats (e.g., direct ISO 8601) or if the primary parse failed
try {
const parsedDate = new Date(dateStr);
if (!isNaN(parsedDate.getTime())) {
// console.log(`[safeParseDate] Parsed with fallback: ${dateStr} -> ${parsedDate.toISOString()}`);
return parsedDate;
}
} catch (e) {
console.warn(`[safeParseDate] Error parsing with fallback ${dateStr}:`, e);
}
console.warn(`Failed to parse date string: ${dateStr}`);
return null;
}
export async function fetchAndParseCsv( export async function fetchAndParseCsv(
url: string, url: string,
username?: string, username?: string,
password?: string password?: string
): Promise<RawSessionImport[]> { ): Promise<Partial<SessionData>[]> {
const authHeader = const authHeader =
username && password username && password
? "Basic " + Buffer.from(`${username}:${password}`).toString("base64") ? "Basic " + Buffer.from(`${username}:${password}`).toString("base64")
@ -44,39 +235,56 @@ export async function fetchAndParseCsv(
const res = await fetch(url, { const res = await fetch(url, {
headers: authHeader ? { Authorization: authHeader } : {}, headers: authHeader ? { Authorization: authHeader } : {},
}); });
if (!res.ok) throw new Error("Failed to fetch CSV: " + res.statusText);
if (!res.ok) {
throw new Error(`Failed to fetch CSV: ${res.status} ${res.statusText}`);
}
const text = await res.text(); const text = await res.text();
// Parse CSV without headers, using positional column mapping // Parse without expecting headers, using known order
const records: string[][] = parse(text, { const records: CSVRecord[] = parse(text, {
delimiter: ",", delimiter: ",",
from_line: 1, // Start from first line (no headers) columns: [
"session_id",
"start_time",
"end_time",
"ip_address",
"country",
"language",
"messages_sent",
"sentiment",
"escalated",
"forwarded_hr",
"full_transcript_url",
"avg_response_time",
"tokens",
"tokens_eur",
"category",
"initial_msg",
],
from_line: 1,
relax_column_count: true, relax_column_count: true,
skip_empty_lines: true, skip_empty_lines: true,
trim: true, trim: true,
}); });
// Map CSV columns by position to SessionImport fields // Coerce types for relevant columns
return records.map((row) => ({ return records.map((r) => ({
externalSessionId: row[0] || "", id: r.session_id,
startTimeRaw: row[1] || "", startTime: safeParseDate(r.start_time) || new Date(), // Fallback to current date if invalid
endTimeRaw: row[2] || "", endTime: safeParseDate(r.end_time),
ipAddress: row[3] || null, ipAddress: r.ip_address,
countryCode: row[4] || null, country: getCountryCode(r.country),
language: row[5] || null, language: getLanguageCode(r.language),
messagesSent: row[6] ? parseInt(row[6], 10) || null : null, messagesSent: Number(r.messages_sent) || 0,
sentimentRaw: row[7] || null, sentiment: r.sentiment,
escalatedRaw: row[8] || null, escalated: isTruthyValue(r.escalated),
forwardedHrRaw: row[9] || null, forwardedHr: isTruthyValue(r.forwarded_hr),
fullTranscriptUrl: row[10] || null, fullTranscriptUrl: r.full_transcript_url,
avgResponseTimeSeconds: row[11] ? parseFloat(row[11]) || null : null, avgResponseTime: r.avg_response_time
tokens: row[12] ? parseInt(row[12], 10) || null : null, ? parseFloat(r.avg_response_time)
tokensEur: row[13] ? parseFloat(row[13]) || null : null, : null,
category: row[14] || null, tokens: Number(r.tokens) || 0,
initialMessage: row[15] || null, tokensEur: r.tokens_eur ? parseFloat(r.tokens_eur) : 0,
category: normalizeCategory(r.category),
initialMsg: r.initial_msg,
})); }));
} }

332
lib/data-service.ts Normal file
View File

@ -0,0 +1,332 @@
import { prisma } from "./prisma";
// Example: Function to get a user by ID
export async function getUserById(id: string) {
return prisma.user.findUnique({ where: { id } });
}
export async function getCompanyByUserId(userId: string) {
const user = await prisma.user.findUnique({
where: { id: userId },
});
if (!user) return null;
return prisma.company.findUnique({
where: { id: user.companyId },
});
}
export async function updateCompanyCsvUrl(companyId: string, csvUrl: string) {
return prisma.company.update({
where: { id: companyId },
data: { csvUrl },
});
}
export async function findUserByEmailWithCompany(email: string) {
return prisma.user.findUnique({
where: { email },
include: { company: true },
});
}
export async function findSessionsByCompanyIdAndDateRange(companyId: string, startDate?: string, endDate?: string) {
const whereClause: any = {
companyId,
processed: true,
};
if (startDate && endDate) {
whereClause.startTime = {
gte: new Date(startDate),
lte: new Date(endDate + "T23:59:59.999Z"),
};
}
return prisma.session.findMany({
where: whereClause,
include: {
messages: true,
},
});
}
export async function getDistinctSessionCategories(companyId: string) {
const categories = await prisma.session.findMany({
where: {
companyId,
category: {
not: null,
},
},
distinct: ["category"],
select: {
category: true,
},
orderBy: {
category: "asc",
},
});
return categories.map((s) => s.category).filter(Boolean) as string[];
}
export async function getDistinctSessionLanguages(companyId: string) {
const languages = await prisma.session.findMany({
where: {
companyId,
language: {
not: null,
},
},
distinct: ["language"],
select: {
language: true,
},
orderBy: {
language: "asc",
},
});
return languages.map((s) => s.language).filter(Boolean) as string[];
}
export async function getSessionById(id: string) {
return prisma.session.findUnique({
where: { id },
include: {
messages: {
orderBy: { order: "asc" },
},
},
});
}
export async function getFilteredAndPaginatedSessions(
companyId: string,
searchTerm: string | null,
category: string | null,
language: string | null,
startDate: string | null,
endDate: string | null,
sortKey: string | null,
sortOrder: string | null,
page: number,
pageSize: number
) {
const whereClause: Prisma.SessionWhereInput = { companyId };
// Search Term
if (
searchTerm &&
typeof searchTerm === "string" &&
searchTerm.trim() !== ""
) {
const searchConditions = [
{ id: { contains: searchTerm } },
{ category: { contains: searchTerm } },
{ initialMsg: { contains: searchTerm } },
];
whereClause.OR = searchConditions;
}
// Category Filter
if (category && typeof category === "string" && category.trim() !== "") {
whereClause.category = category;
}
// Language Filter
if (language && typeof language === "string" && language.trim() !== "") {
whereClause.language = language;
}
// Date Range Filter
if (startDate && typeof startDate === "string") {
whereClause.startTime = {
...((whereClause.startTime as object) || {}),
gte: new Date(startDate),
};
}
if (endDate && typeof endDate === "string") {
const inclusiveEndDate = new Date(endDate);
inclusiveEndDate.setDate(inclusiveEndDate.getDate() + 1);
whereClause.startTime = {
...((whereClause.startTime as object) || {}),
lt: inclusiveEndDate,
};
}
// Sorting
const validSortKeys: { [key: string]: string } = {
startTime: "startTime",
category: "category",
language: "language",
sentiment: "sentiment",
messagesSent: "messagesSent",
avgResponseTime: "avgResponseTime",
};
let orderByCondition:
| Prisma.SessionOrderByWithRelationInput
| Prisma.SessionOrderByWithRelationInput[];
const primarySortField =
sortKey && typeof sortKey === "string" && validSortKeys[sortKey]
? validSortKeys[sortKey]
: "startTime"; // Default to startTime field if sortKey is invalid/missing
const primarySortOrder =
sortOrder === "asc" || sortOrder === "desc" ? sortOrder : "desc"; // Default to desc order
if (primarySortField === "startTime") {
// If sorting by startTime, it's the only sort criteria
orderByCondition = { [primarySortField]: primarySortOrder };
} else {
// If sorting by another field, use startTime: "desc" as secondary sort
orderByCondition = [
{ [primarySortField]: primarySortOrder },
{ startTime: "desc" },
];
}
return prisma.session.findMany({
where: whereClause,
orderBy: orderByCondition,
skip: (page - 1) * pageSize,
take: pageSize,
});
}
export async function countFilteredSessions(
companyId: string,
searchTerm: string | null,
category: string | null,
language: string | null,
startDate: string | null,
endDate: string | null
) {
const whereClause: Prisma.SessionWhereInput = { companyId };
// Search Term
if (
searchTerm &&
typeof searchTerm === "string" &&
searchTerm.trim() !== ""
) {
const searchConditions = [
{ id: { contains: searchTerm } },
{ category: { contains: searchTerm } },
{ initialMsg: { contains: searchTerm } },
];
whereClause.OR = searchConditions;
}
// Category Filter
if (category && typeof category === "string" && category.trim() !== "") {
whereClause.category = category;
}
// Language Filter
if (language && typeof language === "string" && language.trim() !== "") {
whereClause.language = language;
}
// Date Range Filter
if (startDate && typeof startDate === "string") {
whereClause.startTime = {
...((whereClause.startTime as object) || {}),
gte: new Date(startDate),
};
}
if (endDate && typeof endDate === "string") {
const inclusiveEndDate = new Date(endDate);
inclusiveEndDate.setDate(inclusiveEndDate.getDate() + 1);
whereClause.startTime = {
...((whereClause.startTime as object) || {}),
lt: inclusiveEndDate,
};
}
return prisma.session.count({ where: whereClause });
}
export async function updateCompanySettings(
companyId: string,
data: {
csvUrl?: string;
csvUsername?: string;
csvPassword?: string;
sentimentAlert?: number | null;
}
) {
return prisma.company.update({
where: { id: companyId },
data,
});
}
export async function getUsersByCompanyId(companyId: string) {
return prisma.user.findMany({
where: { companyId },
});
}
export async function userExistsByEmail(email: string) {
return prisma.user.findUnique({ where: { email } });
}
export async function createUser(email: string, passwordHash: string, companyId: string, role: string) {
return prisma.user.create({
data: {
email,
password: passwordHash,
companyId,
role,
},
});
}
export async function updateUserResetToken(email: string, token: string, expiry: Date) {
return prisma.user.update({
where: { email },
data: { resetToken: token, resetTokenExpiry: expiry },
});
}
export async function createCompany(name: string, csvUrl: string) {
return prisma.company.create({
data: { name, csvUrl },
});
}
export async function findUserByResetToken(token: string) {
return prisma.user.findFirst({
where: {
resetToken: token,
resetTokenExpiry: { gte: new Date() },
},
});
}
export async function updateUserPasswordAndResetToken(userId: string, passwordHash: string) {
return prisma.user.update({
where: { id: userId },
data: {
password: passwordHash,
resetToken: null,
resetTokenExpiry: null,
},
});
}
// Add more data fetching functions here as needed
import { Prisma } from "@prisma/client";
export async function getSessionByCompanyId(where: Prisma.SessionWhereInput) {
return prisma.session.findFirst({
orderBy: { createdAt: "desc" },
where,
});
}
export async function getCompanyById(companyId: string) {
return prisma.company.findUnique({ where: { id: companyId } });
}

View File

@ -1,147 +0,0 @@
// Centralized environment variable management
import { readFileSync } from "fs";
import { fileURLToPath } from "url";
import { dirname, join } from "path";
/**
* Parse environment variable value by removing quotes, comments, and trimming whitespace
*/
function parseEnvValue(value: string | undefined): string {
if (!value) return '';
// Trim whitespace
let cleaned = value.trim();
// Remove inline comments (everything after #)
const commentIndex = cleaned.indexOf('#');
if (commentIndex !== -1) {
cleaned = cleaned.substring(0, commentIndex).trim();
}
// Remove surrounding quotes (both single and double)
if ((cleaned.startsWith('"') && cleaned.endsWith('"')) ||
(cleaned.startsWith("'") && cleaned.endsWith("'"))) {
cleaned = cleaned.slice(1, -1);
}
return cleaned;
}
/**
* Parse integer with fallback to default value
*/
function parseIntWithDefault(value: string | undefined, defaultValue: number): number {
const cleaned = parseEnvValue(value);
if (!cleaned) return defaultValue;
const parsed = parseInt(cleaned, 10);
return isNaN(parsed) ? defaultValue : parsed;
}
// Load environment variables from .env.local
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const envPath = join(__dirname, '..', '.env.local');
// Load .env.local if it exists
try {
const envFile = readFileSync(envPath, 'utf8');
const envVars = envFile.split('\n').filter(line => line.trim() && !line.startsWith('#'));
envVars.forEach(line => {
const [key, ...valueParts] = line.split('=');
if (key && valueParts.length > 0) {
const rawValue = valueParts.join('=');
const cleanedValue = parseEnvValue(rawValue);
if (!process.env[key.trim()]) {
process.env[key.trim()] = cleanedValue;
}
}
});
} catch (error) {
// Silently fail if .env.local doesn't exist
}
/**
* Typed environment variables with defaults
*/
export const env = {
// NextAuth
NEXTAUTH_URL: parseEnvValue(process.env.NEXTAUTH_URL) || 'http://localhost:3000',
NEXTAUTH_SECRET: parseEnvValue(process.env.NEXTAUTH_SECRET) || '',
NODE_ENV: parseEnvValue(process.env.NODE_ENV) || 'development',
// OpenAI
OPENAI_API_KEY: parseEnvValue(process.env.OPENAI_API_KEY) || '',
// Scheduler Configuration
SCHEDULER_ENABLED: parseEnvValue(process.env.SCHEDULER_ENABLED) === 'true',
CSV_IMPORT_INTERVAL: parseEnvValue(process.env.CSV_IMPORT_INTERVAL) || '*/15 * * * *',
IMPORT_PROCESSING_INTERVAL: parseEnvValue(process.env.IMPORT_PROCESSING_INTERVAL) || '*/5 * * * *',
IMPORT_PROCESSING_BATCH_SIZE: parseIntWithDefault(process.env.IMPORT_PROCESSING_BATCH_SIZE, 50),
SESSION_PROCESSING_INTERVAL: parseEnvValue(process.env.SESSION_PROCESSING_INTERVAL) || '0 * * * *',
SESSION_PROCESSING_BATCH_SIZE: parseIntWithDefault(process.env.SESSION_PROCESSING_BATCH_SIZE, 0),
SESSION_PROCESSING_CONCURRENCY: parseIntWithDefault(process.env.SESSION_PROCESSING_CONCURRENCY, 5),
// Server
PORT: parseIntWithDefault(process.env.PORT, 3000),
} as const;
/**
* Validate required environment variables
*/
export function validateEnv(): { valid: boolean; errors: string[] } {
const errors: string[] = [];
if (!env.NEXTAUTH_SECRET) {
errors.push('NEXTAUTH_SECRET is required');
}
if (!env.OPENAI_API_KEY && env.NODE_ENV === 'production') {
errors.push('OPENAI_API_KEY is required in production');
}
return {
valid: errors.length === 0,
errors,
};
}
/**
* Get scheduler configuration from environment variables
*/
export function getSchedulerConfig() {
return {
enabled: env.SCHEDULER_ENABLED,
csvImport: {
interval: env.CSV_IMPORT_INTERVAL,
},
importProcessing: {
interval: env.IMPORT_PROCESSING_INTERVAL,
batchSize: env.IMPORT_PROCESSING_BATCH_SIZE,
},
sessionProcessing: {
interval: env.SESSION_PROCESSING_INTERVAL,
batchSize: env.SESSION_PROCESSING_BATCH_SIZE,
concurrency: env.SESSION_PROCESSING_CONCURRENCY,
},
};
}
/**
* Log environment configuration (safe for production)
*/
export function logEnvConfig(): void {
console.log('[Environment] Configuration:');
console.log(` NODE_ENV: ${env.NODE_ENV}`);
console.log(` NEXTAUTH_URL: ${env.NEXTAUTH_URL}`);
console.log(` SCHEDULER_ENABLED: ${env.SCHEDULER_ENABLED}`);
console.log(` PORT: ${env.PORT}`);
if (env.SCHEDULER_ENABLED) {
console.log(' Scheduler intervals:');
console.log(` CSV Import: ${env.CSV_IMPORT_INTERVAL}`);
console.log(` Import Processing: ${env.IMPORT_PROCESSING_INTERVAL}`);
console.log(` Session Processing: ${env.SESSION_PROCESSING_INTERVAL}`);
}
}

View File

@ -1,357 +0,0 @@
// SessionImport to Session processor
import { PrismaClient, SentimentCategory, SessionCategory, ProcessingStage } from "@prisma/client";
import { getSchedulerConfig } from "./env";
import { fetchTranscriptContent, isValidTranscriptUrl } from "./transcriptFetcher";
import { ProcessingStatusManager } from "./processingStatusManager";
import cron from "node-cron";
const prisma = new PrismaClient();
/**
* Parse European date format (DD.MM.YYYY HH:mm:ss) to JavaScript Date
*/
function parseEuropeanDate(dateStr: string): Date {
if (!dateStr || typeof dateStr !== 'string') {
throw new Error(`Invalid date string: ${dateStr}`);
}
// Handle format: "DD.MM.YYYY HH:mm:ss"
const [datePart, timePart] = dateStr.trim().split(' ');
if (!datePart || !timePart) {
throw new Error(`Invalid date format: ${dateStr}. Expected format: DD.MM.YYYY HH:mm:ss`);
}
const [day, month, year] = datePart.split('.');
if (!day || !month || !year) {
throw new Error(`Invalid date part: ${datePart}. Expected format: DD.MM.YYYY`);
}
// Convert to ISO format: YYYY-MM-DD HH:mm:ss
const isoDateStr = `${year}-${month.padStart(2, '0')}-${day.padStart(2, '0')} ${timePart}`;
const date = new Date(isoDateStr);
if (isNaN(date.getTime())) {
throw new Error(`Failed to parse date: ${dateStr} -> ${isoDateStr}`);
}
return date;
}
/**
* Helper function to parse sentiment from raw string (fallback only)
*/
function parseFallbackSentiment(sentimentRaw: string | null): SentimentCategory | null {
if (!sentimentRaw) return null;
const sentimentStr = sentimentRaw.toLowerCase();
if (sentimentStr.includes('positive')) {
return SentimentCategory.POSITIVE;
} else if (sentimentStr.includes('negative')) {
return SentimentCategory.NEGATIVE;
} else {
return SentimentCategory.NEUTRAL;
}
}
/**
* Helper function to parse boolean from raw string (fallback only)
*/
function parseFallbackBoolean(rawValue: string | null): boolean | null {
if (!rawValue) return null;
return ['true', '1', 'yes', 'escalated', 'forwarded'].includes(rawValue.toLowerCase());
}
/**
* Parse transcript content into Message records
*/
async function parseTranscriptIntoMessages(sessionId: string, transcriptContent: string): Promise<void> {
// Clear existing messages for this session
await prisma.message.deleteMany({
where: { sessionId }
});
// Split transcript into lines and parse each message
const lines = transcriptContent.split('\n').filter(line => line.trim());
let order = 0;
for (const line of lines) {
const trimmedLine = line.trim();
if (!trimmedLine) continue;
// Try to parse different formats:
// Format 1: "User: message" or "Assistant: message"
// Format 2: "[timestamp] User: message" or "[timestamp] Assistant: message"
let role = 'unknown';
let content = trimmedLine;
let timestamp: Date | null = null;
// Check for timestamp format: [DD.MM.YYYY HH:mm:ss] Role: content
const timestampMatch = trimmedLine.match(/^\[([^\]]+)\]\s*(.+)$/);
if (timestampMatch) {
try {
timestamp = parseEuropeanDate(timestampMatch[1]);
content = timestampMatch[2];
} catch (error) {
// If timestamp parsing fails, treat the whole line as content
content = trimmedLine;
}
}
// Extract role and message content
const roleMatch = content.match(/^(User|Assistant|System):\s*(.*)$/i);
if (roleMatch) {
role = roleMatch[1].toLowerCase();
content = roleMatch[2].trim();
} else {
// If no role prefix found, try to infer from context or use 'unknown'
role = 'unknown';
}
// Skip empty content
if (!content) continue;
// Create message record
await prisma.message.create({
data: {
sessionId,
timestamp,
role,
content,
order,
},
});
order++;
}
console.log(`[Import Processor] ✓ Parsed ${order} messages for session ${sessionId}`);
}
/**
* Process a single SessionImport record into a Session record
* Uses new unified processing status tracking
*/
async function processSingleImport(importRecord: any): Promise<{ success: boolean; error?: string }> {
let sessionId: string | null = null;
try {
// Parse dates using European format parser
const startTime = parseEuropeanDate(importRecord.startTimeRaw);
const endTime = parseEuropeanDate(importRecord.endTimeRaw);
console.log(`[Import Processor] Processing ${importRecord.externalSessionId}: ${startTime.toISOString()} - ${endTime.toISOString()}`);
// Create or update Session record with MINIMAL processing
const session = await prisma.session.upsert({
where: {
importId: importRecord.id,
},
update: {
startTime,
endTime,
// Direct copies (minimal processing)
ipAddress: importRecord.ipAddress,
country: importRecord.countryCode, // Keep as country code
fullTranscriptUrl: importRecord.fullTranscriptUrl,
avgResponseTime: importRecord.avgResponseTimeSeconds,
initialMsg: importRecord.initialMessage,
},
create: {
companyId: importRecord.companyId,
importId: importRecord.id,
startTime,
endTime,
// Direct copies (minimal processing)
ipAddress: importRecord.ipAddress,
country: importRecord.countryCode, // Keep as country code
fullTranscriptUrl: importRecord.fullTranscriptUrl,
avgResponseTime: importRecord.avgResponseTimeSeconds,
initialMsg: importRecord.initialMessage,
},
});
sessionId = session.id;
// Initialize processing status for this session
await ProcessingStatusManager.initializeSession(sessionId);
// Mark CSV_IMPORT as completed
await ProcessingStatusManager.completeStage(sessionId, ProcessingStage.CSV_IMPORT);
// Handle transcript fetching
let transcriptContent = importRecord.rawTranscriptContent;
if (!transcriptContent && importRecord.fullTranscriptUrl && isValidTranscriptUrl(importRecord.fullTranscriptUrl)) {
await ProcessingStatusManager.startStage(sessionId, ProcessingStage.TRANSCRIPT_FETCH);
console.log(`[Import Processor] Fetching transcript for ${importRecord.externalSessionId}...`);
// Get company credentials for transcript fetching
const company = await prisma.company.findUnique({
where: { id: importRecord.companyId },
select: { csvUsername: true, csvPassword: true },
});
const transcriptResult = await fetchTranscriptContent(
importRecord.fullTranscriptUrl,
company?.csvUsername || undefined,
company?.csvPassword || undefined
);
if (transcriptResult.success) {
transcriptContent = transcriptResult.content;
console.log(`[Import Processor] ✓ Fetched transcript for ${importRecord.externalSessionId} (${transcriptContent?.length} chars)`);
// Update the import record with the fetched content
await prisma.sessionImport.update({
where: { id: importRecord.id },
data: { rawTranscriptContent: transcriptContent },
});
await ProcessingStatusManager.completeStage(sessionId, ProcessingStage.TRANSCRIPT_FETCH, {
contentLength: transcriptContent?.length || 0,
url: importRecord.fullTranscriptUrl
});
} else {
console.log(`[Import Processor] ⚠️ Failed to fetch transcript for ${importRecord.externalSessionId}: ${transcriptResult.error}`);
await ProcessingStatusManager.failStage(sessionId, ProcessingStage.TRANSCRIPT_FETCH, transcriptResult.error || 'Unknown error');
}
} else if (!importRecord.fullTranscriptUrl) {
// No transcript URL available - skip this stage
await ProcessingStatusManager.skipStage(sessionId, ProcessingStage.TRANSCRIPT_FETCH, 'No transcript URL provided');
} else {
// Transcript already fetched
await ProcessingStatusManager.completeStage(sessionId, ProcessingStage.TRANSCRIPT_FETCH, {
contentLength: transcriptContent?.length || 0,
source: 'already_fetched'
});
}
// Handle session creation (parse messages)
await ProcessingStatusManager.startStage(sessionId, ProcessingStage.SESSION_CREATION);
if (transcriptContent) {
await parseTranscriptIntoMessages(sessionId, transcriptContent);
}
await ProcessingStatusManager.completeStage(sessionId, ProcessingStage.SESSION_CREATION, {
hasTranscript: !!transcriptContent,
transcriptLength: transcriptContent?.length || 0
});
return { success: true };
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
// Mark the current stage as failed if we have a sessionId
if (sessionId) {
// Determine which stage failed based on the error
if (errorMessage.includes('transcript') || errorMessage.includes('fetch')) {
await ProcessingStatusManager.failStage(sessionId, ProcessingStage.TRANSCRIPT_FETCH, errorMessage);
} else if (errorMessage.includes('message') || errorMessage.includes('parse')) {
await ProcessingStatusManager.failStage(sessionId, ProcessingStage.SESSION_CREATION, errorMessage);
} else {
// General failure - mark CSV_IMPORT as failed
await ProcessingStatusManager.failStage(sessionId, ProcessingStage.CSV_IMPORT, errorMessage);
}
}
return {
success: false,
error: errorMessage,
};
}
}
/**
* Process unprocessed SessionImport records into Session records
* Uses new processing status system to find imports that need processing
*/
export async function processQueuedImports(batchSize: number = 50): Promise<void> {
console.log('[Import Processor] Starting to process unprocessed imports...');
let totalSuccessCount = 0;
let totalErrorCount = 0;
let batchNumber = 1;
while (true) {
// Find SessionImports that don't have a corresponding Session yet
const unprocessedImports = await prisma.sessionImport.findMany({
where: {
session: null, // No session created yet
},
take: batchSize,
orderBy: {
createdAt: 'asc', // Process oldest first
},
});
if (unprocessedImports.length === 0) {
if (batchNumber === 1) {
console.log('[Import Processor] No unprocessed imports found');
} else {
console.log(`[Import Processor] All batches completed. Total: ${totalSuccessCount} successful, ${totalErrorCount} failed`);
}
return;
}
console.log(`[Import Processor] Processing batch ${batchNumber}: ${unprocessedImports.length} imports...`);
let batchSuccessCount = 0;
let batchErrorCount = 0;
// Process each import in this batch
for (const importRecord of unprocessedImports) {
const result = await processSingleImport(importRecord);
if (result.success) {
batchSuccessCount++;
totalSuccessCount++;
console.log(`[Import Processor] ✓ Processed import ${importRecord.externalSessionId}`);
} else {
batchErrorCount++;
totalErrorCount++;
console.log(`[Import Processor] ✗ Failed to process import ${importRecord.externalSessionId}: ${result.error}`);
}
}
console.log(`[Import Processor] Batch ${batchNumber} completed: ${batchSuccessCount} successful, ${batchErrorCount} failed`);
batchNumber++;
// If this batch was smaller than the batch size, we're done
if (unprocessedImports.length < batchSize) {
console.log(`[Import Processor] All batches completed. Total: ${totalSuccessCount} successful, ${totalErrorCount} failed`);
return;
}
}
}
/**
* Start the import processing scheduler
*/
export function startImportProcessingScheduler(): void {
const config = getSchedulerConfig();
if (!config.enabled) {
console.log('[Import Processing Scheduler] Disabled via configuration');
return;
}
// Use a more frequent interval for import processing (every 5 minutes by default)
const interval = process.env.IMPORT_PROCESSING_INTERVAL || '*/5 * * * *';
const batchSize = parseInt(process.env.IMPORT_PROCESSING_BATCH_SIZE || '50', 10);
console.log(`[Import Processing Scheduler] Starting with interval: ${interval}`);
console.log(`[Import Processing Scheduler] Batch size: ${batchSize}`);
cron.schedule(interval, async () => {
try {
await processQueuedImports(batchSize);
} catch (error) {
console.error(`[Import Processing Scheduler] Error: ${error}`);
}
});
}

View File

@ -325,7 +325,16 @@ export function sessionMetrics(
sessions: ChatSession[], sessions: ChatSession[],
companyConfig: CompanyConfig = {} companyConfig: CompanyConfig = {}
): MetricsResult { ): MetricsResult {
const totalSessions = sessions.length; // Renamed from 'total' for clarity // Filter out invalid data sessions for analytics
const validSessions = sessions.filter(session => {
// Include sessions that are either:
// 1. Not processed yet (validData field doesn't exist or is undefined)
// 2. Processed and marked as valid (validData === true)
return session.validData !== false;
});
const totalSessions = validSessions.length; // Only count valid sessions
const totalRawSessions = sessions.length; // Keep track of all sessions for debugging
const byDay: DayMetrics = {}; const byDay: DayMetrics = {};
const byCategory: CategoryMetrics = {}; const byCategory: CategoryMetrics = {};
const byLanguage: LanguageMetrics = {}; const byLanguage: LanguageMetrics = {};
@ -345,21 +354,21 @@ export function sessionMetrics(
let sentimentPositiveCount = 0; let sentimentPositiveCount = 0;
let sentimentNeutralCount = 0; let sentimentNeutralCount = 0;
let sentimentNegativeCount = 0; let sentimentNegativeCount = 0;
const totalTokens = 0; let totalTokens = 0;
const totalTokensEur = 0; let totalTokensEur = 0;
const wordCounts: { [key: string]: number } = {}; const wordCounts: { [key: string]: number } = {};
let alerts = 0; const alerts = 0;
// New metrics variables // New metrics variables
const hourlySessionCounts: { [hour: string]: number } = {}; const hourlySessionCounts: { [hour: string]: number } = {};
let resolvedChatsCount = 0; let resolvedChatsCount = 0;
const questionCounts: { [question: string]: number } = {}; const questionCounts: { [question: string]: number } = {};
for (const session of sessions) { for (const session of sessions) {
// Track hourly usage for peak time calculation // Track hourly usage for peak time calculation
if (session.startTime) { if (session.startTime) {
const hour = new Date(session.startTime).getHours(); const hour = new Date(session.startTime).getHours();
const hourKey = `${hour.toString().padStart(2, '0')}:00`; const hourKey = `${hour.toString().padStart(2, "0")}:00`;
hourlySessionCounts[hourKey] = (hourlySessionCounts[hourKey] || 0) + 1; hourlySessionCounts[hourKey] = (hourlySessionCounts[hourKey] || 0) + 1;
} }
@ -453,25 +462,34 @@ export function sessionMetrics(
if (session.escalated) escalatedCount++; if (session.escalated) escalatedCount++;
if (session.forwardedHr) forwardedHrCount++; if (session.forwardedHr) forwardedHrCount++;
// Sentiment (now using enum values) // Sentiment
if (session.sentiment !== undefined && session.sentiment !== null) { if (session.sentiment === "positive") {
if (session.sentiment === "POSITIVE") sentimentPositiveCount++; sentimentPositiveCount++;
else if (session.sentiment === "NEGATIVE") sentimentNegativeCount++; } else if (session.sentiment === "neutral") {
else if (session.sentiment === "NEUTRAL") sentimentNeutralCount++; sentimentNeutralCount++;
} else if (session.sentiment === "negative") {
sentimentNegativeCount++;
} }
// Sentiment Alert Check (simplified for enum)
if (
companyConfig.sentimentAlert !== undefined && // Tokens
session.sentiment === "NEGATIVE" if (session.tokens !== undefined && session.tokens !== null) {
) { totalTokens += session.tokens;
alerts++; }
if (session.tokensEur !== undefined && session.tokensEur !== null) {
totalTokensEur += session.tokensEur;
} }
// Daily metrics // Daily metrics
const day = new Date(session.startTime).toISOString().split("T")[0]; const day = new Date(session.startTime).toISOString().split("T")[0];
byDay[day] = (byDay[day] || 0) + 1; // Sessions per day byDay[day] = (byDay[day] || 0) + 1; // Sessions per day
// Note: tokens and tokensEur are not available in the new schema if (session.tokens !== undefined && session.tokens !== null) {
tokensByDay[day] = (tokensByDay[day] || 0) + session.tokens;
}
if (session.tokensEur !== undefined && session.tokensEur !== null) {
tokensCostByDay[day] = (tokensCostByDay[day] || 0) + session.tokensEur;
}
// Category metrics // Category metrics
if (session.category) { if (session.category) {
@ -490,15 +508,39 @@ export function sessionMetrics(
// Extract questions from session // Extract questions from session
const extractQuestions = () => { const extractQuestions = () => {
// 1. Extract questions from user messages (if available) // 1. Extract from questions JSON field
if (session.questions) {
try {
const questionsArray = JSON.parse(session.questions);
if (Array.isArray(questionsArray)) {
questionsArray.forEach((question: string) => {
if (question && question.trim().length > 0) {
const cleanQuestion = question.trim();
questionCounts[cleanQuestion] =
(questionCounts[cleanQuestion] || 0) + 1;
}
});
}
} catch (error) {
console.warn(
`[metrics] Failed to parse questions JSON for session ${session.id}: ${error}`
);
}
}
// 2. Extract questions from user messages (if available)
if (session.messages) { if (session.messages) {
session.messages session.messages
.filter(msg => msg.role === 'User') .filter((msg) => msg.role === "User")
.forEach(msg => { .forEach((msg) => {
const content = msg.content.trim(); const content = msg.content.trim();
// Simple heuristic: if message ends with ? or contains question words, treat as question // Simple heuristic: if message ends with ? or contains question words, treat as question
if (content.endsWith('?') || if (
/\b(what|when|where|why|how|who|which|can|could|would|will|is|are|do|does|did)\b/i.test(content)) { content.endsWith("?") ||
/\b(what|when|where|why|how|who|which|can|could|would|will|is|are|do|does|did)\b/i.test(
content
)
) {
questionCounts[content] = (questionCounts[content] || 0) + 1; questionCounts[content] = (questionCounts[content] || 0) + 1;
} }
}); });
@ -507,8 +549,12 @@ export function sessionMetrics(
// 3. Extract questions from initial message as fallback // 3. Extract questions from initial message as fallback
if (session.initialMsg) { if (session.initialMsg) {
const content = session.initialMsg.trim(); const content = session.initialMsg.trim();
if (content.endsWith('?') || if (
/\b(what|when|where|why|how|who|which|can|could|would|will|is|are|do|does|did)\b/i.test(content)) { content.endsWith("?") ||
/\b(what|when|where|why|how|who|which|can|could|would|will|is|are|do|does|did)\b/i.test(
content
)
) {
questionCounts[content] = (questionCounts[content] || 0) + 1; questionCounts[content] = (questionCounts[content] || 0) + 1;
} }
} }
@ -580,20 +626,23 @@ export function sessionMetrics(
// Calculate new metrics // Calculate new metrics
// 1. Average Daily Costs (euros) // 1. Average Daily Costs (euros)
const avgDailyCosts = numDaysWithSessions > 0 ? totalTokensEur / numDaysWithSessions : 0; const avgDailyCosts =
numDaysWithSessions > 0 ? totalTokensEur / numDaysWithSessions : 0;
// 2. Peak Usage Time // 2. Peak Usage Time
let peakUsageTime = "N/A"; let peakUsageTime = "N/A";
if (Object.keys(hourlySessionCounts).length > 0) { if (Object.keys(hourlySessionCounts).length > 0) {
const peakHour = Object.entries(hourlySessionCounts) const peakHour = Object.entries(hourlySessionCounts).sort(
.sort(([, a], [, b]) => b - a)[0][0]; ([, a], [, b]) => b - a
const peakHourNum = parseInt(peakHour.split(':')[0]); )[0][0];
const peakHourNum = parseInt(peakHour.split(":")[0]);
const endHour = (peakHourNum + 1) % 24; const endHour = (peakHourNum + 1) % 24;
peakUsageTime = `${peakHour}-${endHour.toString().padStart(2, '0')}:00`; peakUsageTime = `${peakHour}-${endHour.toString().padStart(2, "0")}:00`;
} }
// 3. Resolved Chats Percentage // 3. Resolved Chats Percentage
const resolvedChatsPercentage = totalSessions > 0 ? (resolvedChatsCount / totalSessions) * 100 : 0; const resolvedChatsPercentage =
totalSessions > 0 ? (resolvedChatsCount / totalSessions) * 100 : 0;
// 4. Top 5 Asked Questions // 4. Top 5 Asked Questions
const topQuestions: TopQuestion[] = Object.entries(questionCounts) const topQuestions: TopQuestion[] = Object.entries(questionCounts)

View File

@ -1,78 +1,51 @@
// Enhanced session processing scheduler with AI cost tracking and question management // Session processing scheduler - TypeScript version
import cron from "node-cron"; // Note: Disabled due to Next.js compatibility issues
import { PrismaClient, SentimentCategory, SessionCategory, ProcessingStage } from "@prisma/client"; // import cron from "node-cron";
import { PrismaClient } from "@prisma/client";
import fetch from "node-fetch"; import fetch from "node-fetch";
import { getSchedulerConfig } from "./schedulerConfig"; import { readFileSync } from "fs";
import { ProcessingStatusManager } from "./processingStatusManager"; import { fileURLToPath } from "url";
import { dirname, join } from "path";
import { VALID_CATEGORIES, ValidCategory, SentimentCategory } from "./types";
// Load environment variables from .env.local
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const envPath = join(__dirname, "..", ".env.local");
try {
const envFile = readFileSync(envPath, "utf8");
const envVars = envFile
.split("\n")
.filter((line) => line.trim() && !line.startsWith("#"));
envVars.forEach((line) => {
const [key, ...valueParts] = line.split("=");
if (key && valueParts.length > 0) {
const value = valueParts.join("=").trim();
if (!process.env[key.trim()]) {
process.env[key.trim()] = value;
}
}
});
} catch (error) {
// Silently fail if .env.local doesn't exist
}
const prisma = new PrismaClient(); const prisma = new PrismaClient();
const OPENAI_API_KEY = process.env.OPENAI_API_KEY; const OPENAI_API_KEY = process.env.OPENAI_API_KEY;
const OPENAI_API_URL = "https://api.openai.com/v1/chat/completions"; const OPENAI_API_URL = "https://api.openai.com/v1/chat/completions";
const DEFAULT_MODEL = process.env.OPENAI_MODEL || "gpt-4o";
const USD_TO_EUR_RATE = 0.85; // Update periodically or fetch from API
/**
* Get company's default AI model
*/
async function getCompanyAIModel(companyId: string): Promise<string> {
const companyModel = await prisma.companyAIModel.findFirst({
where: {
companyId,
isDefault: true,
},
include: {
aiModel: true,
},
});
return companyModel?.aiModel.name || DEFAULT_MODEL;
}
/**
* Get current pricing for an AI model
*/
async function getCurrentModelPricing(modelName: string): Promise<{
promptTokenCost: number;
completionTokenCost: number;
} | null> {
const model = await prisma.aIModel.findUnique({
where: { name: modelName },
include: {
pricing: {
where: {
effectiveFrom: { lte: new Date() },
OR: [
{ effectiveUntil: null },
{ effectiveUntil: { gte: new Date() } }
]
},
orderBy: { effectiveFrom: 'desc' },
take: 1,
},
},
});
if (!model || model.pricing.length === 0) {
return null;
}
const pricing = model.pricing[0];
return {
promptTokenCost: pricing.promptTokenCost,
completionTokenCost: pricing.completionTokenCost,
};
}
interface ProcessedData { interface ProcessedData {
language: string; language: string;
sentiment: "POSITIVE" | "NEUTRAL" | "NEGATIVE"; sentiment: "positive" | "neutral" | "negative";
escalated: boolean; escalated: boolean;
forwarded_hr: boolean; forwarded_hr: boolean;
category: "SCHEDULE_HOURS" | "LEAVE_VACATION" | "SICK_LEAVE_RECOVERY" | "SALARY_COMPENSATION" | "CONTRACT_HOURS" | "ONBOARDING" | "OFFBOARDING" | "WORKWEAR_STAFF_PASS" | "TEAM_CONTACTS" | "PERSONAL_QUESTIONS" | "ACCESS_LOGIN" | "SOCIAL_QUESTIONS" | "UNRECOGNIZED_OTHER"; category: ValidCategory;
questions: string[]; questions: string | string[];
summary: string; summary: string;
session_id: string; tokens: number;
tokens_eur: number;
} }
interface ProcessingResult { interface ProcessingResult {
@ -81,184 +54,53 @@ interface ProcessingResult {
error?: string; error?: string;
} }
/**
* Record AI processing request with detailed token tracking
*/
async function recordAIProcessingRequest(
sessionId: string,
openaiResponse: any,
processingType: string = 'session_analysis'
): Promise<void> {
const usage = openaiResponse.usage;
const model = openaiResponse.model;
// Get current pricing from database
const pricing = await getCurrentModelPricing(model);
// Fallback pricing if not found in database
const fallbackPricing = {
promptTokenCost: 0.00001, // $10.00 per 1M tokens (gpt-4-turbo rate)
completionTokenCost: 0.00003, // $30.00 per 1M tokens
};
const finalPricing = pricing || fallbackPricing;
const promptCost = usage.prompt_tokens * finalPricing.promptTokenCost;
const completionCost = usage.completion_tokens * finalPricing.completionTokenCost;
const totalCostUsd = promptCost + completionCost;
const totalCostEur = totalCostUsd * USD_TO_EUR_RATE;
await prisma.aIProcessingRequest.create({
data: {
sessionId,
openaiRequestId: openaiResponse.id,
model: openaiResponse.model,
serviceTier: openaiResponse.service_tier,
systemFingerprint: openaiResponse.system_fingerprint,
promptTokens: usage.prompt_tokens,
completionTokens: usage.completion_tokens,
totalTokens: usage.total_tokens,
// Detailed breakdown
cachedTokens: usage.prompt_tokens_details?.cached_tokens || null,
audioTokensPrompt: usage.prompt_tokens_details?.audio_tokens || null,
reasoningTokens: usage.completion_tokens_details?.reasoning_tokens || null,
audioTokensCompletion: usage.completion_tokens_details?.audio_tokens || null,
acceptedPredictionTokens: usage.completion_tokens_details?.accepted_prediction_tokens || null,
rejectedPredictionTokens: usage.completion_tokens_details?.rejected_prediction_tokens || null,
promptTokenCost: finalPricing.promptTokenCost,
completionTokenCost: finalPricing.completionTokenCost,
totalCostEur,
processingType,
success: true,
completedAt: new Date(),
}
});
}
/**
* Record failed AI processing request
*/
async function recordFailedAIProcessingRequest(
sessionId: string,
processingType: string,
errorMessage: string
): Promise<void> {
await prisma.aIProcessingRequest.create({
data: {
sessionId,
model: 'unknown',
promptTokens: 0,
completionTokens: 0,
totalTokens: 0,
promptTokenCost: 0,
completionTokenCost: 0,
totalCostEur: 0,
processingType,
success: false,
errorMessage,
completedAt: new Date(),
}
});
}
/**
* Process questions into separate Question and SessionQuestion tables
*/
async function processQuestions(sessionId: string, questions: string[]): Promise<void> {
// Clear existing questions for this session
await prisma.sessionQuestion.deleteMany({
where: { sessionId }
});
// Process each question
for (let index = 0; index < questions.length; index++) {
const questionText = questions[index];
if (!questionText.trim()) continue; // Skip empty questions
// Find or create question
const question = await prisma.question.upsert({
where: { content: questionText.trim() },
create: { content: questionText.trim() },
update: {}
});
// Link to session
await prisma.sessionQuestion.create({
data: {
sessionId,
questionId: question.id,
order: index
}
});
}
}
/**
* Calculate messagesSent from actual Message records
*/
async function calculateMessagesSent(sessionId: string): Promise<number> {
const userMessageCount = await prisma.message.count({
where: {
sessionId,
role: { in: ['user', 'User'] } // Handle both cases
}
});
return userMessageCount;
}
/**
* Calculate endTime from latest Message timestamp
*/
async function calculateEndTime(sessionId: string, fallbackEndTime: Date): Promise<Date> {
const latestMessage = await prisma.message.findFirst({
where: { sessionId },
orderBy: { timestamp: 'desc' }
});
return latestMessage?.timestamp || fallbackEndTime;
}
/** /**
* Processes a session transcript using OpenAI API * Processes a session transcript using OpenAI API
*/ */
async function processTranscriptWithOpenAI(sessionId: string, transcript: string, companyId: string): Promise<ProcessedData> { async function processTranscriptWithOpenAI(
sessionId: string,
transcript: string
): Promise<ProcessedData> {
if (!OPENAI_API_KEY) { if (!OPENAI_API_KEY) {
throw new Error("OPENAI_API_KEY environment variable is not set"); throw new Error("OPENAI_API_KEY environment variable is not set");
} }
// Get company's AI model // Create a system message with instructions
const aiModel = await getCompanyAIModel(companyId);
// Updated system message with exact enum values
const systemMessage = ` const systemMessage = `
You are an AI assistant tasked with analyzing chat transcripts. System: You are a JSON-generating assistant. Your task is to analyze raw chat transcripts between a user and an assistant and return structured data.
Extract the following information from the transcript and return it in EXACT JSON format:
⚠️ IMPORTANT:
{ - You must return a **single, valid JSON object**.
"language": "ISO 639-1 code (e.g., 'en', 'nl', 'de')", - Do **not** include markdown formatting, code fences, explanations, or comments.
"sentiment": "POSITIVE|NEUTRAL|NEGATIVE", - The JSON must match the exact structure and constraints described below.
"escalated": boolean,
"forwarded_hr": boolean, Here is the schema you must follow:
"category": "SCHEDULE_HOURS|LEAVE_VACATION|SICK_LEAVE_RECOVERY|SALARY_COMPENSATION|CONTRACT_HOURS|ONBOARDING|OFFBOARDING|WORKWEAR_STAFF_PASS|TEAM_CONTACTS|PERSONAL_QUESTIONS|ACCESS_LOGIN|SOCIAL_QUESTIONS|UNRECOGNIZED_OTHER",
"questions": ["question 1", "question 2", ...], {{
"summary": "brief summary (10-300 chars)", "language": "ISO 639-1 code, e.g., 'en', 'nl'",
"session_id": "${sessionId}" "sentiment": "'positive', 'neutral', or 'negative'",
} "escalated": "bool: true if the assistant connected or referred to a human agent, otherwise false",
"forwarded_hr": "bool: true if HR contact info was given, otherwise false",
Rules: "category": "one of: 'Schedule & Hours', 'Leave & Vacation', 'Sick Leave & Recovery', 'Salary & Compensation', 'Contract & Hours', 'Onboarding', 'Offboarding', 'Workwear & Staff Pass', 'Team & Contacts', 'Personal Questions', 'Access & Login', 'Social questions', 'Unrecognized / Other'",
- language: Primary language used by the user (ISO 639-1 code) "questions": "a single question or an array of simplified questions asked by the user formulated in English, try to make a question out of messages",
- sentiment: Overall emotional tone of the conversation "summary": "Brief summary (12 sentences) of the conversation",
- escalated: Was the issue escalated to a supervisor/manager? "tokens": "integer, number of tokens used for the API call",
- forwarded_hr: Was HR contact mentioned or provided? "tokens_eur": "float, cost of the API call in EUR",
- category: Best fitting category for the main topic (use exact enum values above) }}
- questions: Up to 5 paraphrased user questions (in English)
- summary: Brief conversation summary (10-300 characters) You must format your output as a JSON value that adheres to a given "JSON Schema" instance.
IMPORTANT: Use EXACT enum values as specified above. "JSON Schema" is a declarative language that allows you to annotate and validate JSON documents.
For example, the example "JSON Schema" instance {"properties": {"foo": {"description": "a list of test words", "type": "array", "items": {"type": "string"}}}}, "required": ["foo"]}}
would match an object with one required property, "foo". The "type" property specifies "foo" must be an "array", and the "description" property semantically describes it as "a list of test words". The items within "foo" must be strings.
Thus, the object {"foo": ["bar", "baz"]} is a well-formatted instance of this example "JSON Schema". The object {"properties": {"foo": ["bar", "baz"]}} is not well-formatted.
Your output will be parsed and type-checked according to the provided schema instance, so make sure all fields in your output match the schema exactly and there are no trailing commas!
Here is the JSON Schema instance your output must adhere to. Include the enclosing markdown codeblock:
{{"type":"object","properties":{"language":{"type":"string","pattern":"^[a-z]{2}$","description":"ISO 639-1 code for the user's primary language"},"sentiment":{"type":"string","enum":["positive","neutral","negative"],"description":"Overall tone of the user during the conversation"},"escalated":{"type":"boolean","description":"Whether the assistant indicated it could not help"},"forwarded_hr":{"type":"boolean","description":"Whether HR contact was mentioned or provided"},"category":{"type":"string","enum":["Schedule & Hours","Leave & Vacation","Sick Leave & Recovery","Salary & Compensation","Contract & Hours","Onboarding","Offboarding","Workwear & Staff Pass","Team & Contacts","Personal Questions","Access & Login","Social questions","Unrecognized / Other"],"description":"Best-fitting topic category for the conversation"},"questions":{"oneOf":[{"type":"string"},{"type":"array","items":{"type":"string"}}],"description":"A single question or a list of paraphrased questions asked by the user in English"},"summary":{"type":"string","minLength":10,"maxLength":300,"description":"Brief summary of the conversation"},"tokens":{"type":"integer","description":"Number of tokens used for the API call"},"tokens_eur":{"type":"number","description":"Cost of the API call in EUR"}},"required":["language","sentiment","escalated","forwarded_hr","category","questions","summary","tokens","tokens_eur"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}}
`; `;
try { try {
@ -269,7 +111,7 @@ async function processTranscriptWithOpenAI(sessionId: string, transcript: string
Authorization: `Bearer ${OPENAI_API_KEY}`, Authorization: `Bearer ${OPENAI_API_KEY}`,
}, },
body: JSON.stringify({ body: JSON.stringify({
model: aiModel, // Use company's configured AI model model: "gpt-4-turbo",
messages: [ messages: [
{ {
role: "system", role: "system",
@ -290,25 +132,14 @@ async function processTranscriptWithOpenAI(sessionId: string, transcript: string
throw new Error(`OpenAI API error: ${response.status} - ${errorText}`); throw new Error(`OpenAI API error: ${response.status} - ${errorText}`);
} }
const openaiResponse: any = await response.json(); const data: any = await response.json();
const processedData = JSON.parse(data.choices[0].message.content);
// Record the AI processing request for cost tracking
await recordAIProcessingRequest(sessionId, openaiResponse, 'session_analysis');
const processedData = JSON.parse(openaiResponse.choices[0].message.content);
// Validate the response against our expected schema // Validate the response against our expected schema
validateOpenAIResponse(processedData); validateOpenAIResponse(processedData);
return processedData; return processedData;
} catch (error) { } catch (error) {
// Record failed request
await recordFailedAIProcessingRequest(
sessionId,
'session_analysis',
error instanceof Error ? error.message : String(error)
);
process.stderr.write(`Error processing transcript with OpenAI: ${error}\n`); process.stderr.write(`Error processing transcript with OpenAI: ${error}\n`);
throw error; throw error;
} }
@ -318,9 +149,17 @@ async function processTranscriptWithOpenAI(sessionId: string, transcript: string
* Validates the OpenAI response against our expected schema * Validates the OpenAI response against our expected schema
*/ */
function validateOpenAIResponse(data: any): void { function validateOpenAIResponse(data: any): void {
// Check required fields
const requiredFields = [ const requiredFields = [
"language", "sentiment", "escalated", "forwarded_hr", "language",
"category", "questions", "summary", "session_id" "sentiment",
"escalated",
"forwarded_hr",
"category",
"questions",
"summary",
"tokens",
"tokens_eur",
]; ];
for (const field of requiredFields) { for (const field of requiredFields) {
@ -329,13 +168,17 @@ function validateOpenAIResponse(data: any): void {
} }
} }
// Validate field types and values // Validate field types
if (typeof data.language !== "string" || !/^[a-z]{2}$/.test(data.language)) { if (typeof data.language !== "string" || !/^[a-z]{2}$/.test(data.language)) {
throw new Error("Invalid language format. Expected ISO 639-1 code (e.g., 'en')"); throw new Error(
"Invalid language format. Expected ISO 639-1 code (e.g., 'en')"
);
} }
if (!["POSITIVE", "NEUTRAL", "NEGATIVE"].includes(data.sentiment)) { if (!["positive", "neutral", "negative"].includes(data.sentiment)) {
throw new Error("Invalid sentiment. Expected 'POSITIVE', 'NEUTRAL', or 'NEGATIVE'"); throw new Error(
"Invalid sentiment. Expected 'positive', 'neutral', or 'negative'"
);
} }
if (typeof data.escalated !== "boolean") { if (typeof data.escalated !== "boolean") {
@ -346,27 +189,32 @@ function validateOpenAIResponse(data: any): void {
throw new Error("Invalid forwarded_hr. Expected boolean"); throw new Error("Invalid forwarded_hr. Expected boolean");
} }
const validCategories = [ if (!VALID_CATEGORIES.includes(data.category)) {
"SCHEDULE_HOURS", "LEAVE_VACATION", "SICK_LEAVE_RECOVERY", "SALARY_COMPENSATION", throw new Error(
"CONTRACT_HOURS", "ONBOARDING", "OFFBOARDING", "WORKWEAR_STAFF_PASS", `Invalid category. Expected one of: ${VALID_CATEGORIES.join(", ")}`
"TEAM_CONTACTS", "PERSONAL_QUESTIONS", "ACCESS_LOGIN", "SOCIAL_QUESTIONS", );
"UNRECOGNIZED_OTHER"
];
if (!validCategories.includes(data.category)) {
throw new Error(`Invalid category. Expected one of: ${validCategories.join(", ")}`);
} }
if (!Array.isArray(data.questions)) { if (typeof data.questions !== "string" && !Array.isArray(data.questions)) {
throw new Error("Invalid questions. Expected array of strings"); throw new Error("Invalid questions. Expected string or array of strings");
} }
if (typeof data.summary !== "string" || data.summary.length < 10 || data.summary.length > 300) { if (
throw new Error("Invalid summary. Expected string between 10-300 characters"); typeof data.summary !== "string" ||
data.summary.length < 10 ||
data.summary.length > 300
) {
throw new Error(
"Invalid summary. Expected string between 10-300 characters"
);
} }
if (typeof data.session_id !== "string") { if (typeof data.tokens !== "number" || data.tokens < 0) {
throw new Error("Invalid session_id. Expected string"); throw new Error("Invalid tokens. Expected non-negative number");
}
if (typeof data.tokens_eur !== "number" || data.tokens_eur < 0) {
throw new Error("Invalid tokens_eur. Expected non-negative number");
} }
} }
@ -382,80 +230,92 @@ async function processSingleSession(session: any): Promise<ProcessingResult> {
}; };
} }
try { // Check for minimum data quality requirements
// Mark AI analysis as started const userMessages = session.messages.filter((msg: any) =>
await ProcessingStatusManager.startStage(session.id, ProcessingStage.AI_ANALYSIS); msg.role.toLowerCase() === 'user' || msg.role.toLowerCase() === 'human'
);
if (userMessages.length === 0) {
// Mark as invalid data - no user interaction
await prisma.session.update({
where: { id: session.id },
data: {
processed: true,
summary: "No user messages found - marked as invalid data",
},
});
return {
sessionId: session.id,
success: true,
error: "No user messages - marked as invalid data",
};
}
try {
// Convert messages back to transcript format for OpenAI processing // Convert messages back to transcript format for OpenAI processing
const transcript = session.messages const transcript = session.messages
.map((msg: any) => .map(
`[${new Date(msg.timestamp) (msg: any) =>
.toLocaleString("en-GB", { `[${new Date(msg.timestamp)
day: "2-digit", .toLocaleString("en-GB", {
month: "2-digit", day: "2-digit",
year: "numeric", month: "2-digit",
hour: "2-digit", year: "numeric",
minute: "2-digit", hour: "2-digit",
second: "2-digit", minute: "2-digit",
}) second: "2-digit",
.replace(",", "")}] ${msg.role}: ${msg.content}` })
.replace(",", "")}] ${msg.role}: ${msg.content}`
) )
.join("\n"); .join("\n");
const processedData = await processTranscriptWithOpenAI(session.id, transcript, session.companyId); const processedData = await processTranscriptWithOpenAI(
session.id,
transcript
);
// Calculate messagesSent from actual Message records // Check if the processed data indicates low quality (empty questions, very short summary, etc.)
const messagesSent = await calculateMessagesSent(session.id); const hasValidQuestions =
processedData.questions &&
// Calculate endTime from latest Message timestamp (Array.isArray(processedData.questions)
const calculatedEndTime = await calculateEndTime(session.id, session.endTime); ? processedData.questions.length > 0
: typeof processedData.questions === "string");
const hasValidSummary = processedData.summary && processedData.summary.length >= 10;
const isValidData = hasValidQuestions && hasValidSummary;
// Update the session with processed data // Update the session with processed data
await prisma.session.update({ await prisma.session.update({
where: { id: session.id }, where: { id: session.id },
data: { data: {
language: processedData.language, language: processedData.language,
messagesSent: messagesSent, // Calculated from Messages, not AI sentiment: processedData.sentiment,
endTime: calculatedEndTime, // Use calculated endTime if different
sentiment: processedData.sentiment as SentimentCategory,
escalated: processedData.escalated, escalated: processedData.escalated,
forwardedHr: processedData.forwarded_hr, forwardedHr: processedData.forwarded_hr,
category: processedData.category as SessionCategory, category: processedData.category,
questions: processedData.questions,
summary: processedData.summary, summary: processedData.summary,
tokens: {
increment: processedData.tokens,
},
tokensEur: {
increment: processedData.tokens_eur,
},
processed: true,
}, },
}); });
// Mark AI analysis as completed if (!isValidData) {
await ProcessingStatusManager.completeStage(session.id, ProcessingStage.AI_ANALYSIS, { process.stdout.write(
language: processedData.language, `[ProcessingScheduler] ⚠️ Session ${session.id} marked as invalid data (empty questions or short summary)\n`
sentiment: processedData.sentiment, );
category: processedData.category, }
questionsCount: processedData.questions.length
});
// Start question extraction stage
await ProcessingStatusManager.startStage(session.id, ProcessingStage.QUESTION_EXTRACTION);
// Process questions into separate tables
await processQuestions(session.id, processedData.questions);
// Mark question extraction as completed
await ProcessingStatusManager.completeStage(session.id, ProcessingStage.QUESTION_EXTRACTION, {
questionsProcessed: processedData.questions.length
});
return { return {
sessionId: session.id, sessionId: session.id,
success: true, success: true,
}; };
} catch (error) { } catch (error) {
// Mark AI analysis as failed
await ProcessingStatusManager.failStage(
session.id,
ProcessingStage.AI_ANALYSIS,
error instanceof Error ? error.message : String(error)
);
return { return {
sessionId: session.id, sessionId: session.id,
success: false, success: false,
@ -467,7 +327,10 @@ async function processSingleSession(session: any): Promise<ProcessingResult> {
/** /**
* Process sessions in parallel with concurrency limit * Process sessions in parallel with concurrency limit
*/ */
async function processSessionsInParallel(sessions: any[], maxConcurrency: number = 5): Promise<ProcessingResult[]> { async function processSessionsInParallel(
sessions: any[],
maxConcurrency: number = 5
): Promise<ProcessingResult[]> {
const results: Promise<ProcessingResult>[] = []; const results: Promise<ProcessingResult>[] = [];
const executing: Promise<ProcessingResult>[] = []; const executing: Promise<ProcessingResult>[] = [];
@ -486,7 +349,7 @@ async function processSessionsInParallel(sessions: any[], maxConcurrency: number
if (executing.length >= maxConcurrency) { if (executing.length >= maxConcurrency) {
await Promise.race(executing); await Promise.race(executing);
const completedIndex = executing.findIndex(p => p === promise); const completedIndex = executing.findIndex((p) => p === promise);
if (completedIndex !== -1) { if (completedIndex !== -1) {
executing.splice(completedIndex, 1); executing.splice(completedIndex, 1);
} }
@ -497,124 +360,116 @@ async function processSessionsInParallel(sessions: any[], maxConcurrency: number
} }
/** /**
* Process unprocessed sessions using the new processing status system * Process unprocessed sessions in batches until completion
*/ */
export async function processUnprocessedSessions(batchSize: number | null = null, maxConcurrency: number = 5): Promise<void> { export async function processUnprocessedSessions(
process.stdout.write("[ProcessingScheduler] Starting to process sessions needing AI analysis...\n"); batchSize: number = 10,
maxConcurrency: number = 5
// Get sessions that need AI processing using the new status system ): Promise<{ totalProcessed: number; totalFailed: number; totalTime: number }> {
const sessionsNeedingAI = await ProcessingStatusManager.getSessionsNeedingProcessing(
ProcessingStage.AI_ANALYSIS,
batchSize || 50
);
if (sessionsNeedingAI.length === 0) {
process.stdout.write("[ProcessingScheduler] No sessions found requiring AI processing.\n");
return;
}
// Get session IDs that need processing
const sessionIds = sessionsNeedingAI.map(statusRecord => statusRecord.sessionId);
// Fetch full session data with messages
const sessionsToProcess = await prisma.session.findMany({
where: {
id: { in: sessionIds }
},
include: {
messages: {
orderBy: { order: "asc" },
},
},
});
// Filter to only sessions that have messages
const sessionsWithMessages = sessionsToProcess.filter(
(session: any) => session.messages && session.messages.length > 0
);
if (sessionsWithMessages.length === 0) {
process.stdout.write("[ProcessingScheduler] No sessions with messages found requiring processing.\n");
return;
}
process.stdout.write( process.stdout.write(
`[ProcessingScheduler] Found ${sessionsWithMessages.length} sessions to process (max concurrency: ${maxConcurrency}).\n` "[ProcessingScheduler] Starting complete processing of all unprocessed sessions...\n"
); );
const startTime = Date.now(); let totalProcessed = 0;
const results = await processSessionsInParallel(sessionsWithMessages, maxConcurrency); let totalFailed = 0;
const endTime = Date.now(); const overallStartTime = Date.now();
let batchNumber = 1;
const successCount = results.filter((r) => r.success).length; while (true) {
const errorCount = results.filter((r) => !r.success).length; // Find sessions that have messages but haven't been processed
const sessionsToProcess = await prisma.session.findMany({
where: {
AND: [
{ messages: { some: {} } }, // Must have messages
{ processed: false }, // Only unprocessed sessions
],
},
include: {
messages: {
orderBy: { order: "asc" },
},
},
take: batchSize,
});
process.stdout.write("[ProcessingScheduler] Session processing complete.\n"); // Filter to only sessions that have messages
process.stdout.write(`[ProcessingScheduler] Successfully processed: ${successCount} sessions.\n`); const sessionsWithMessages = sessionsToProcess.filter(
process.stdout.write(`[ProcessingScheduler] Failed to process: ${errorCount} sessions.\n`); (session: any) => session.messages && session.messages.length > 0
process.stdout.write(`[ProcessingScheduler] Total processing time: ${((endTime - startTime) / 1000).toFixed(2)}s\n`); );
if (sessionsWithMessages.length === 0) {
process.stdout.write(
"[ProcessingScheduler] ✅ All sessions with messages have been processed!\n"
);
break;
}
process.stdout.write(
`[ProcessingScheduler] 📦 Batch ${batchNumber}: Processing ${sessionsWithMessages.length} sessions (max concurrency: ${maxConcurrency})...\n`
);
const batchStartTime = Date.now();
const results = await processSessionsInParallel(
sessionsWithMessages,
maxConcurrency
);
const batchEndTime = Date.now();
const batchSuccessCount = results.filter((r) => r.success).length;
const batchErrorCount = results.filter((r) => !r.success).length;
totalProcessed += batchSuccessCount;
totalFailed += batchErrorCount;
process.stdout.write(
`[ProcessingScheduler] 📦 Batch ${batchNumber} complete: ${batchSuccessCount} success, ${batchErrorCount} failed (${((batchEndTime - batchStartTime) / 1000).toFixed(2)}s)\n`
);
batchNumber++;
// Small delay between batches to prevent overwhelming the system
if (sessionsWithMessages.length === batchSize) {
await new Promise(resolve => setTimeout(resolve, 1000));
}
}
const overallEndTime = Date.now();
const totalTime = (overallEndTime - overallStartTime) / 1000;
process.stdout.write("[ProcessingScheduler] 🎉 Complete processing finished!\n");
process.stdout.write(
`[ProcessingScheduler] 📊 Total results: ${totalProcessed} processed, ${totalFailed} failed\n`
);
process.stdout.write(
`[ProcessingScheduler] ⏱️ Total processing time: ${totalTime.toFixed(2)}s\n`
);
return { totalProcessed, totalFailed, totalTime };
} }
/** /**
* Get total AI processing costs for reporting * Start the processing scheduler
*/
export async function getAIProcessingCosts(): Promise<{
totalCostEur: number;
totalTokens: number;
requestCount: number;
successfulRequests: number;
failedRequests: number;
}> {
const result = await prisma.aIProcessingRequest.aggregate({
_sum: {
totalCostEur: true,
totalTokens: true,
},
_count: {
id: true,
},
});
const successfulRequests = await prisma.aIProcessingRequest.count({
where: { success: true }
});
const failedRequests = await prisma.aIProcessingRequest.count({
where: { success: false }
});
return {
totalCostEur: result._sum.totalCostEur || 0,
totalTokens: result._sum.totalTokens || 0,
requestCount: result._count.id || 0,
successfulRequests,
failedRequests,
};
}
/**
* Start the processing scheduler with configurable settings
*/ */
export function startProcessingScheduler(): void { export function startProcessingScheduler(): void {
const config = getSchedulerConfig(); // Note: Scheduler disabled due to Next.js compatibility issues
// Use manual triggers via API endpoints instead
console.log("Processing scheduler disabled - using manual triggers via API endpoints");
if (!config.enabled) { // Original cron-based implementation commented out due to Next.js compatibility issues
console.log('[Processing Scheduler] Disabled via configuration'); // The functionality is now available via the /api/admin/trigger-processing endpoint
return; /*
} cron.schedule("0 * * * *", async () => {
console.log(`[Processing Scheduler] Starting with interval: ${config.sessionProcessing.interval}`);
console.log(`[Processing Scheduler] Batch size: ${config.sessionProcessing.batchSize === 0 ? 'unlimited' : config.sessionProcessing.batchSize}`);
console.log(`[Processing Scheduler] Concurrency: ${config.sessionProcessing.concurrency}`);
cron.schedule(config.sessionProcessing.interval, async () => {
try { try {
await processUnprocessedSessions( await processUnprocessedSessions();
config.sessionProcessing.batchSize === 0 ? null : config.sessionProcessing.batchSize,
config.sessionProcessing.concurrency
);
} catch (error) { } catch (error) {
process.stderr.write(`[ProcessingScheduler] Error in scheduler: ${error}\n`); process.stderr.write(
`[ProcessingScheduler] Error in scheduler: ${error}\n`
);
} }
}); });
process.stdout.write(
"[ProcessingScheduler] Started processing scheduler (runs hourly).\n"
);
*/
} }

View File

@ -0,0 +1,447 @@
// Session processing without cron dependency - for Next.js API routes
import { PrismaClient } from "@prisma/client";
import fetch from "node-fetch";
import { readFileSync } from "fs";
import { fileURLToPath } from "url";
import { dirname, join } from "path";
import { VALID_CATEGORIES, ValidCategory, SentimentCategory } from "./types";
// Load environment variables from .env.local
const __filename = fileURLToPath(import.meta.url);
const __dirname = dirname(__filename);
const envPath = join(__dirname, "..", ".env.local");
try {
const envFile = readFileSync(envPath, "utf8");
const envVars = envFile
.split("\n")
.filter((line) => line.trim() && !line.startsWith("#"));
envVars.forEach((line) => {
const [key, ...valueParts] = line.split("=");
if (key && valueParts.length > 0) {
const value = valueParts.join("=").trim();
if (!process.env[key.trim()]) {
process.env[key.trim()] = value;
}
}
});
} catch (error) {
// Silently fail if .env.local doesn't exist
}
const prisma = new PrismaClient();
const OPENAI_API_KEY = process.env.OPENAI_API_KEY;
const OPENAI_API_URL = "https://api.openai.com/v1/chat/completions";
interface ProcessedData {
language: string;
sentiment: "positive" | "neutral" | "negative";
escalated: boolean;
forwarded_hr: boolean;
category: ValidCategory;
questions: string | string[];
summary: string;
tokens: number;
tokens_eur: number;
}
interface ProcessingResult {
sessionId: string;
success: boolean;
error?: string;
}
/**
* Processes a session transcript using OpenAI API
*/
async function processTranscriptWithOpenAI(
sessionId: string,
transcript: string
): Promise<ProcessedData> {
if (!OPENAI_API_KEY) {
throw new Error("OPENAI_API_KEY environment variable is not set");
}
// Create a system message with instructions
const systemMessage = `
System: You are a JSON-generating assistant. Your task is to analyze raw chat transcripts between a user and an assistant and return structured data.
⚠️ IMPORTANT:
- You must return a **single, valid JSON object**.
- Do **not** include markdown formatting, code fences, explanations, or comments.
- The JSON must match the exact structure and constraints described below.
Here is the schema you must follow:
{
"language": "ISO 639-1 code, e.g., 'en', 'nl'",
"sentiment": "'positive', 'neutral', or 'negative'",
"escalated": "bool: true if the assistant connected or referred to a human agent, otherwise false",
"forwarded_hr": "bool: true if HR contact info was given, otherwise false",
"category": "one of: 'Schedule & Hours', 'Leave & Vacation', 'Sick Leave & Recovery', 'Salary & Compensation', 'Contract & Hours', 'Onboarding', 'Offboarding', 'Workwear & Staff Pass', 'Team & Contacts', 'Personal Questions', 'Access & Login', 'Social questions', 'Unrecognized / Other'",
"questions": "a single question or an array of simplified questions asked by the user formulated in English, try to make a question out of messages",
"summary": "Brief summary (12 sentences) of the conversation",
"tokens": "integer, number of tokens used for the API call",
"tokens_eur": "float, cost of the API call in EUR",
}
You must format your output as a JSON value that adheres to a given "JSON Schema" instance.
"JSON Schema" is a declarative language that allows you to annotate and validate JSON documents.
For example, the example "JSON Schema" instance {{"properties": {{"foo": {{"description": "a list of test words", "type": "array", "items": {{"type": "string"}}}}}}, "required": ["foo"]}}}}
would match an object with one required property, "foo". The "type" property specifies "foo" must be an "array", and the "description" property semantically describes it as "a list of test words". The items within "foo" must be strings.
Thus, the object {{"foo": ["bar", "baz"]}} is a well-formatted instance of this example "JSON Schema". The object {{"properties": {{"foo": ["bar", "baz"]}}}} is not well-formatted.
Your output will be parsed and type-checked according to the provided schema instance, so make sure all fields in your output match the schema exactly and there are no trailing commas!
Here is the JSON Schema instance your output must adhere to. Include the enclosing markdown codeblock:
{{"type":"object","properties":{"language":{"type":"string","pattern":"^[a-z]{2}$","description":"ISO 639-1 code for the user's primary language"},"sentiment":{"type":"string","enum":["positive","neutral","negative"],"description":"Overall tone of the user during the conversation"},"escalated":{"type":"boolean","description":"Whether the assistant indicated it could not help"},"forwarded_hr":{"type":"boolean","description":"Whether HR contact was mentioned or provided"},"category":{"type":"string","enum":["Schedule & Hours","Leave & Vacation","Sick Leave & Recovery","Salary & Compensation","Contract & Hours","Onboarding","Offboarding","Workwear & Staff Pass","Team & Contacts","Personal Questions","Access & Login","Social questions","Unrecognized / Other"],"description":"Best-fitting topic category for the conversation"},"questions":{"oneOf":[{"type":"string"},{"type":"array","items":{"type":"string"}}],"description":"A single question or a list of paraphrased questions asked by the user in English"},"summary":{"type":"string","minLength":10,"maxLength":300,"description":"Brief summary of the conversation"},"tokens":{"type":"integer","description":"Number of tokens used for the API call"},"tokens_eur":{"type":"number","description":"Cost of the API call in EUR"}},"required":["language","sentiment","escalated","forwarded_hr","category","questions","summary","tokens","tokens_eur"],"additionalProperties":false,"$schema":"http://json-schema.org/draft-07/schema#"}}
`;
try {
const response = await fetch(OPENAI_API_URL, {
method: "POST",
headers: {
"Content-Type": "application/json",
Authorization: `Bearer ${OPENAI_API_KEY}`,
},
body: JSON.stringify({
model: "gpt-4-turbo",
messages: [
{
role: "system",
content: systemMessage,
},
{
role: "user",
content: transcript,
},
],
temperature: 0.3, // Lower temperature for more consistent results
response_format: { type: "json_object" },
}),
});
if (!response.ok) {
const errorText = await response.text();
throw new Error(`OpenAI API error: ${response.status} - ${errorText}`);
}
const data: any = await response.json();
const processedData = JSON.parse(data.choices[0].message.content);
// Validate the response against our expected schema
validateOpenAIResponse(processedData);
return processedData;
} catch (error) {
process.stderr.write(`Error processing transcript with OpenAI: ${error}\n`);
throw error;
}
}
/**
* Validates the OpenAI response against our expected schema
*/
function validateOpenAIResponse(data: any): void {
// Check required fields
const requiredFields = [
"language",
"sentiment",
"escalated",
"forwarded_hr",
"category",
"questions",
"summary",
"tokens",
"tokens_eur",
];
for (const field of requiredFields) {
if (!(field in data)) {
throw new Error(`Missing required field: ${field}`);
}
}
// Validate field types
if (typeof data.language !== "string" || !/^[a-z]{2}$/.test(data.language)) {
throw new Error(
"Invalid language format. Expected ISO 639-1 code (e.g., 'en')"
);
}
if (!["positive", "neutral", "negative"].includes(data.sentiment)) {
throw new Error(
"Invalid sentiment. Expected 'positive', 'neutral', or 'negative'"
);
}
if (typeof data.escalated !== "boolean") {
throw new Error("Invalid escalated. Expected boolean");
}
if (typeof data.forwarded_hr !== "boolean") {
throw new Error("Invalid forwarded_hr. Expected boolean");
}
if (!VALID_CATEGORIES.includes(data.category)) {
throw new Error(
`Invalid category. Expected one of: ${VALID_CATEGORIES.join(", ")}`
);
}
if (typeof data.questions !== "string" && !Array.isArray(data.questions)) {
throw new Error("Invalid questions. Expected string or array of strings");
}
if (
typeof data.summary !== "string" ||
data.summary.length < 10 ||
data.summary.length > 300
) {
throw new Error(
"Invalid summary. Expected string between 10-300 characters"
);
}
if (typeof data.tokens !== "number" || data.tokens < 0) {
throw new Error("Invalid tokens. Expected non-negative number");
}
if (typeof data.tokens_eur !== "number" || data.tokens_eur < 0) {
throw new Error("Invalid tokens_eur. Expected non-negative number");
}
}
/**
* Process a single session
*/
async function processSingleSession(session: any): Promise<ProcessingResult> {
if (session.messages.length === 0) {
return {
sessionId: session.id,
success: false,
error: "Session has no messages",
};
}
// Check for minimum data quality requirements
const userMessages = session.messages.filter((msg: any) =>
msg.role.toLowerCase() === 'user' || msg.role.toLowerCase() === 'human'
);
if (userMessages.length === 0) {
// Mark as invalid data - no user interaction
await prisma.session.update({
where: { id: session.id },
data: {
processed: true,
summary: "No user messages found - marked as invalid data",
},
});
return {
sessionId: session.id,
success: true,
error: "No user messages - marked as invalid data",
};
}
try {
// Convert messages back to transcript format for OpenAI processing
const transcript = session.messages
.map(
(msg: any) =>
`[${new Date(msg.timestamp)
.toLocaleString("en-GB", {
day: "2-digit",
month: "2-digit",
year: "numeric",
hour: "2-digit",
minute: "2-digit",
second: "2-digit",
})
.replace(",", "")}] ${msg.role}: ${msg.content}`
)
.join("\n");
const processedData = await processTranscriptWithOpenAI(
session.id,
transcript
);
// Check if the processed data indicates low quality (empty questions, very short summary, etc.)
const hasValidQuestions =
processedData.questions &&
(Array.isArray(processedData.questions)
? processedData.questions.length > 0
: typeof processedData.questions === "string");
const hasValidSummary = processedData.summary && processedData.summary.length >= 10;
const isValidData = hasValidQuestions && hasValidSummary;
// Update the session with processed data
await prisma.session.update({
where: { id: session.id },
data: {
language: processedData.language,
sentiment: processedData.sentiment,
escalated: processedData.escalated,
forwardedHr: processedData.forwarded_hr,
category: processedData.category,
questions: processedData.questions,
summary: processedData.summary,
tokens: {
increment: processedData.tokens,
},
tokensEur: {
increment: processedData.tokens_eur,
},
processed: true,
},
});
if (!isValidData) {
process.stdout.write(
`[ProcessingScheduler] ⚠️ Session ${session.id} marked as invalid data (empty questions or short summary)\n`
);
}
return {
sessionId: session.id,
success: true,
};
} catch (error) {
return {
sessionId: session.id,
success: false,
error: error instanceof Error ? error.message : String(error),
};
}
}
/**
* Process sessions in parallel with concurrency limit
*/
async function processSessionsInParallel(
sessions: any[],
maxConcurrency: number = 5
): Promise<ProcessingResult[]> {
const results: Promise<ProcessingResult>[] = [];
const executing: Promise<ProcessingResult>[] = [];
for (const session of sessions) {
const promise = processSingleSession(session).then((result) => {
process.stdout.write(
result.success
? `[ProcessingScheduler] ✓ Successfully processed session ${result.sessionId}\n`
: `[ProcessingScheduler] ✗ Failed to process session ${result.sessionId}: ${result.error}\n`
);
return result;
});
results.push(promise);
executing.push(promise);
if (executing.length >= maxConcurrency) {
await Promise.race(executing);
const completedIndex = executing.findIndex((p) => p === promise);
if (completedIndex !== -1) {
executing.splice(completedIndex, 1);
}
}
}
return Promise.all(results);
}
/**
* Process unprocessed sessions in batches until completion
*/
export async function processUnprocessedSessions(
batchSize: number = 10,
maxConcurrency: number = 5
): Promise<{ totalProcessed: number; totalFailed: number; totalTime: number }> {
process.stdout.write(
"[ProcessingScheduler] Starting complete processing of all unprocessed sessions...\n"
);
let totalProcessed = 0;
let totalFailed = 0;
const overallStartTime = Date.now();
let batchNumber = 1;
while (true) {
// Find sessions that have messages but haven't been processed
const sessionsToProcess = await prisma.session.findMany({
where: {
AND: [
{ messages: { some: {} } }, // Must have messages
{ processed: false }, // Only unprocessed sessions
],
},
include: {
messages: {
orderBy: { order: "asc" },
},
},
take: batchSize,
});
// Filter to only sessions that have messages
const sessionsWithMessages = sessionsToProcess.filter(
(session: any) => session.messages && session.messages.length > 0
);
if (sessionsWithMessages.length === 0) {
process.stdout.write(
"[ProcessingScheduler] ✅ All sessions with messages have been processed!\n"
);
break;
}
process.stdout.write(
`[ProcessingScheduler] 📦 Batch ${batchNumber}: Processing ${sessionsWithMessages.length} sessions (max concurrency: ${maxConcurrency})...\n`
);
const batchStartTime = Date.now();
const results = await processSessionsInParallel(
sessionsWithMessages,
maxConcurrency
);
const batchEndTime = Date.now();
const batchSuccessCount = results.filter((r) => r.success).length;
const batchErrorCount = results.filter((r) => !r.success).length;
totalProcessed += batchSuccessCount;
totalFailed += batchErrorCount;
process.stdout.write(
`[ProcessingScheduler] 📦 Batch ${batchNumber} complete: ${batchSuccessCount} success, ${batchErrorCount} failed (${((batchEndTime - batchStartTime) / 1000).toFixed(2)}s)\n`
);
batchNumber++;
// Small delay between batches to prevent overwhelming the system
if (sessionsWithMessages.length === batchSize) {
await new Promise(resolve => setTimeout(resolve, 1000));
}
}
const overallEndTime = Date.now();
const totalTime = (overallEndTime - overallStartTime) / 1000;
process.stdout.write("[ProcessingScheduler] 🎉 Complete processing finished!\n");
process.stdout.write(
`[ProcessingScheduler] 📊 Total results: ${totalProcessed} processed, ${totalFailed} failed\n`
);
process.stdout.write(
`[ProcessingScheduler] ⏱️ Total processing time: ${totalTime.toFixed(2)}s\n`
);
return { totalProcessed, totalFailed, totalTime };
}

View File

@ -1,295 +0,0 @@
import { PrismaClient, ProcessingStage, ProcessingStatus } from '@prisma/client';
const prisma = new PrismaClient();
/**
* Centralized processing status management
*/
export class ProcessingStatusManager {
/**
* Initialize processing status for a session with all stages set to PENDING
*/
static async initializeSession(sessionId: string): Promise<void> {
const stages = [
ProcessingStage.CSV_IMPORT,
ProcessingStage.TRANSCRIPT_FETCH,
ProcessingStage.SESSION_CREATION,
ProcessingStage.AI_ANALYSIS,
ProcessingStage.QUESTION_EXTRACTION,
];
// Create all processing status records for this session
await prisma.sessionProcessingStatus.createMany({
data: stages.map(stage => ({
sessionId,
stage,
status: ProcessingStatus.PENDING,
})),
skipDuplicates: true, // In case some already exist
});
}
/**
* Start a processing stage
*/
static async startStage(
sessionId: string,
stage: ProcessingStage,
metadata?: any
): Promise<void> {
await prisma.sessionProcessingStatus.upsert({
where: {
sessionId_stage: { sessionId, stage }
},
update: {
status: ProcessingStatus.IN_PROGRESS,
startedAt: new Date(),
errorMessage: null,
metadata: metadata || null,
},
create: {
sessionId,
stage,
status: ProcessingStatus.IN_PROGRESS,
startedAt: new Date(),
metadata: metadata || null,
},
});
}
/**
* Complete a processing stage successfully
*/
static async completeStage(
sessionId: string,
stage: ProcessingStage,
metadata?: any
): Promise<void> {
await prisma.sessionProcessingStatus.upsert({
where: {
sessionId_stage: { sessionId, stage }
},
update: {
status: ProcessingStatus.COMPLETED,
completedAt: new Date(),
errorMessage: null,
metadata: metadata || null,
},
create: {
sessionId,
stage,
status: ProcessingStatus.COMPLETED,
startedAt: new Date(),
completedAt: new Date(),
metadata: metadata || null,
},
});
}
/**
* Mark a processing stage as failed
*/
static async failStage(
sessionId: string,
stage: ProcessingStage,
errorMessage: string,
metadata?: any
): Promise<void> {
await prisma.sessionProcessingStatus.upsert({
where: {
sessionId_stage: { sessionId, stage }
},
update: {
status: ProcessingStatus.FAILED,
completedAt: new Date(),
errorMessage,
retryCount: { increment: 1 },
metadata: metadata || null,
},
create: {
sessionId,
stage,
status: ProcessingStatus.FAILED,
startedAt: new Date(),
completedAt: new Date(),
errorMessage,
retryCount: 1,
metadata: metadata || null,
},
});
}
/**
* Skip a processing stage (e.g., no transcript URL available)
*/
static async skipStage(
sessionId: string,
stage: ProcessingStage,
reason: string
): Promise<void> {
await prisma.sessionProcessingStatus.upsert({
where: {
sessionId_stage: { sessionId, stage }
},
update: {
status: ProcessingStatus.SKIPPED,
completedAt: new Date(),
errorMessage: reason,
},
create: {
sessionId,
stage,
status: ProcessingStatus.SKIPPED,
startedAt: new Date(),
completedAt: new Date(),
errorMessage: reason,
},
});
}
/**
* Get processing status for a specific session
*/
static async getSessionStatus(sessionId: string) {
return await prisma.sessionProcessingStatus.findMany({
where: { sessionId },
orderBy: { stage: 'asc' },
});
}
/**
* Get sessions that need processing for a specific stage
*/
static async getSessionsNeedingProcessing(
stage: ProcessingStage,
limit: number = 50
) {
return await prisma.sessionProcessingStatus.findMany({
where: {
stage,
status: ProcessingStatus.PENDING,
},
include: {
session: {
include: {
import: true,
company: true,
},
},
},
take: limit,
orderBy: { session: { createdAt: 'asc' } },
});
}
/**
* Get pipeline status overview
*/
static async getPipelineStatus() {
// Get counts by stage and status
const statusCounts = await prisma.sessionProcessingStatus.groupBy({
by: ['stage', 'status'],
_count: { id: true },
});
// Get total sessions
const totalSessions = await prisma.session.count();
// Organize the data
const pipeline: Record<string, Record<string, number>> = {};
for (const { stage, status, _count } of statusCounts) {
if (!pipeline[stage]) {
pipeline[stage] = {};
}
pipeline[stage][status] = _count.id;
}
return {
totalSessions,
pipeline,
};
}
/**
* Get sessions with failed processing
*/
static async getFailedSessions(stage?: ProcessingStage) {
const where: any = {
status: ProcessingStatus.FAILED,
};
if (stage) {
where.stage = stage;
}
return await prisma.sessionProcessingStatus.findMany({
where,
include: {
session: {
include: {
import: true,
},
},
},
orderBy: { completedAt: 'desc' },
});
}
/**
* Reset a failed stage for retry
*/
static async resetStageForRetry(sessionId: string, stage: ProcessingStage): Promise<void> {
await prisma.sessionProcessingStatus.update({
where: {
sessionId_stage: { sessionId, stage }
},
data: {
status: ProcessingStatus.PENDING,
startedAt: null,
completedAt: null,
errorMessage: null,
},
});
}
/**
* Check if a session has completed a specific stage
*/
static async hasCompletedStage(sessionId: string, stage: ProcessingStage): Promise<boolean> {
const status = await prisma.sessionProcessingStatus.findUnique({
where: {
sessionId_stage: { sessionId, stage }
},
});
return status?.status === ProcessingStatus.COMPLETED;
}
/**
* Check if a session is ready for a specific stage (previous stages completed)
*/
static async isReadyForStage(sessionId: string, stage: ProcessingStage): Promise<boolean> {
const stageOrder = [
ProcessingStage.CSV_IMPORT,
ProcessingStage.TRANSCRIPT_FETCH,
ProcessingStage.SESSION_CREATION,
ProcessingStage.AI_ANALYSIS,
ProcessingStage.QUESTION_EXTRACTION,
];
const currentStageIndex = stageOrder.indexOf(stage);
if (currentStageIndex === 0) return true; // First stage is always ready
// Check if all previous stages are completed
const previousStages = stageOrder.slice(0, currentStageIndex);
for (const prevStage of previousStages) {
const isCompleted = await this.hasCompletedStage(sessionId, prevStage);
if (!isCompleted) return false;
}
return true;
}
}

View File

@ -1,96 +1,21 @@
// CSV import scheduler with configurable intervals // node-cron job to auto-refresh session data every 15 mins
import cron from "node-cron"; // Note: Disabled due to Next.js compatibility issues
// import cron from "node-cron";
import { prisma } from "./prisma"; import { prisma } from "./prisma";
import { fetchAndParseCsv } from "./csvFetcher"; import { fetchAndParseCsv } from "./csvFetcher";
import { getSchedulerConfig } from "./schedulerConfig";
export function startCsvImportScheduler() { interface SessionCreateData {
const config = getSchedulerConfig(); id: string;
startTime: Date;
if (!config.enabled) { companyId: string;
console.log('[CSV Import Scheduler] Disabled via configuration'); [key: string]: unknown;
return; }
}
export function startScheduler() {
console.log(`[CSV Import Scheduler] Starting with interval: ${config.csvImport.interval}`); // Note: Scheduler disabled due to Next.js compatibility issues
// Use manual triggers via API endpoints instead
cron.schedule(config.csvImport.interval, async () => { console.log("Session refresh scheduler disabled - using manual triggers via API endpoints");
const companies = await prisma.company.findMany();
for (const company of companies) { // Original cron-based implementation commented out due to Next.js compatibility issues
try { // The functionality is now available via the /api/admin/refresh-sessions endpoint
const rawSessionData = await fetchAndParseCsv(
company.csvUrl,
company.csvUsername as string | undefined,
company.csvPassword as string | undefined
);
// Create SessionImport records for new data
for (const rawSession of rawSessionData) {
try {
// Use upsert to handle duplicates gracefully
await prisma.sessionImport.upsert({
where: {
companyId_externalSessionId: {
companyId: company.id,
externalSessionId: rawSession.externalSessionId,
},
},
update: {
// Update existing record with latest data
startTimeRaw: rawSession.startTimeRaw,
endTimeRaw: rawSession.endTimeRaw,
ipAddress: rawSession.ipAddress,
countryCode: rawSession.countryCode,
language: rawSession.language,
messagesSent: rawSession.messagesSent,
sentimentRaw: rawSession.sentimentRaw,
escalatedRaw: rawSession.escalatedRaw,
forwardedHrRaw: rawSession.forwardedHrRaw,
fullTranscriptUrl: rawSession.fullTranscriptUrl,
avgResponseTimeSeconds: rawSession.avgResponseTimeSeconds,
tokens: rawSession.tokens,
tokensEur: rawSession.tokensEur,
category: rawSession.category,
initialMessage: rawSession.initialMessage,
// Status tracking now handled by ProcessingStatusManager
},
create: {
companyId: company.id,
externalSessionId: rawSession.externalSessionId,
startTimeRaw: rawSession.startTimeRaw,
endTimeRaw: rawSession.endTimeRaw,
ipAddress: rawSession.ipAddress,
countryCode: rawSession.countryCode,
language: rawSession.language,
messagesSent: rawSession.messagesSent,
sentimentRaw: rawSession.sentimentRaw,
escalatedRaw: rawSession.escalatedRaw,
forwardedHrRaw: rawSession.forwardedHrRaw,
fullTranscriptUrl: rawSession.fullTranscriptUrl,
avgResponseTimeSeconds: rawSession.avgResponseTimeSeconds,
tokens: rawSession.tokens,
tokensEur: rawSession.tokensEur,
category: rawSession.category,
initialMessage: rawSession.initialMessage,
// Status tracking now handled by ProcessingStatusManager
},
});
} catch (error) {
// Log individual session import errors but continue processing
process.stderr.write(
`[Scheduler] Failed to import session ${rawSession.externalSessionId} for company ${company.name}: ${error}\n`
);
}
}
process.stdout.write(
`[Scheduler] Imported ${rawSessionData.length} session records for company: ${company.name}\n`
);
} catch (e) {
process.stderr.write(
`[Scheduler] Failed to fetch CSV for company: ${company.name} - ${e}\n`
);
}
}
});
} }

View File

@ -1,44 +0,0 @@
// Legacy scheduler configuration - now uses centralized env management
// This file is kept for backward compatibility but delegates to lib/env.ts
import { getSchedulerConfig as getEnvSchedulerConfig, logEnvConfig } from "./env";
export interface SchedulerConfig {
enabled: boolean;
csvImport: {
interval: string;
};
sessionProcessing: {
interval: string;
batchSize: number; // 0 = unlimited
concurrency: number;
};
}
/**
* Get scheduler configuration from environment variables
* @deprecated Use getSchedulerConfig from lib/env.ts instead
*/
export function getSchedulerConfig(): SchedulerConfig {
const config = getEnvSchedulerConfig();
return {
enabled: config.enabled,
csvImport: {
interval: config.csvImport.interval,
},
sessionProcessing: {
interval: config.sessionProcessing.interval,
batchSize: config.sessionProcessing.batchSize,
concurrency: config.sessionProcessing.concurrency,
},
};
}
/**
* Log scheduler configuration
* @deprecated Use logEnvConfig from lib/env.ts instead
*/
export function logSchedulerConfig(config: SchedulerConfig): void {
logEnvConfig();
}

View File

@ -1,18 +1,17 @@
// Combined scheduler initialization // Combined scheduler initialization
import { startCsvImportScheduler } from "./scheduler"; // Note: Removed cron-based scheduler imports to avoid Next.js compatibility issues
import { startProcessingScheduler } from "./processingScheduler"; // import { startScheduler } from "./scheduler";
// import { startProcessingScheduler } from "./processingScheduler";
/** /**
* Initialize all schedulers * Initialize all schedulers
* - CSV import scheduler (runs every 15 minutes) * - Session refresh scheduler (runs every 15 minutes)
* - Session processing scheduler (runs every hour) * - Session processing scheduler (runs every hour)
*/ */
export function initializeSchedulers() { export function initializeSchedulers() {
// Start the CSV import scheduler // Note: All schedulers disabled due to Next.js compatibility issues
startCsvImportScheduler(); // Use manual triggers via API endpoints instead
console.log("Schedulers disabled - using manual triggers via API endpoints");
// Start the session processing scheduler // startScheduler();
startProcessingScheduler(); // startProcessingScheduler();
console.log("All schedulers initialized successfully");
} }

98
lib/session-service.ts Normal file
View File

@ -0,0 +1,98 @@
import { prisma } from "./prisma";
import { fetchAndParseCsv } from "./csvFetcher";
import { triggerCompleteWorkflow } from "./workflow";
interface SessionCreateData {
id: string;
startTime: Date;
companyId: string;
sessionId?: string;
[key: string]: unknown;
}
export async function processSessions(company: any) {
const sessions = await fetchAndParseCsv(
company.csvUrl,
company.csvUsername as string | undefined,
company.csvPassword as string | undefined
);
for (const session of sessions) {
const sessionData: SessionCreateData = {
...session,
companyId: company.id,
id:
session.id ||
session.sessionId ||
`sess_${Date.now()}_${Math.random().toString(36).substring(2, 7)}`,
// Ensure startTime is not undefined
startTime: session.startTime || new Date(),
};
// Validate dates to prevent "Invalid Date" errors
const startTime =
sessionData.startTime instanceof Date &&
!isNaN(sessionData.startTime.getTime())
? sessionData.startTime
: new Date();
const endTime =
session.endTime instanceof Date && !isNaN(session.endTime.getTime())
? session.endTime
: new Date();
// Check if the session already exists
const existingSession = await prisma.session.findUnique({
where: { id: sessionData.id },
});
if (existingSession) {
// Skip this session as it already exists
continue;
}
// Only include fields that are properly typed for Prisma
await prisma.session.create({
data: {
id: sessionData.id,
companyId: sessionData.companyId,
startTime: startTime,
endTime: endTime,
ipAddress: session.ipAddress || null,
country: session.country || null,
language: session.language || null,
messagesSent:
typeof session.messagesSent === "number" ? session.messagesSent : 0,
sentiment:
typeof session.sentiment === "number" ? session.sentiment : null,
escalated:
typeof session.escalated === "boolean" ? session.escalated : null,
forwardedHr:
typeof session.forwardedHr === "boolean"
? session.forwardedHr
: null,
fullTranscriptUrl: session.fullTranscriptUrl || null,
avgResponseTime:
typeof session.avgResponseTime === "number"
? session.avgResponseTime
: null,
tokens: typeof session.tokens === "number" ? session.tokens : null,
tokensEur:
typeof session.tokensEur === "number" ? session.tokensEur : null,
category: session.category || null,
initialMsg: session.initialMsg || null,
},
});
}
// After importing sessions, automatically trigger complete workflow (fetch transcripts + process)
// This runs in the background without blocking the response
triggerCompleteWorkflow()
.then((result) => {
console.log(`[Refresh Sessions] Complete workflow finished: ${result.message}`);
})
.catch((error) => {
console.error(`[Refresh Sessions] Complete workflow failed:`, error);
});
return sessions.length;
}

View File

@ -1,151 +0,0 @@
// Transcript fetching utility
import fetch from "node-fetch";
export interface TranscriptFetchResult {
success: boolean;
content?: string;
error?: string;
}
/**
* Fetch transcript content from a URL
* @param url The transcript URL
* @param username Optional username for authentication
* @param password Optional password for authentication
* @returns Promise with fetch result
*/
export async function fetchTranscriptContent(
url: string,
username?: string,
password?: string
): Promise<TranscriptFetchResult> {
try {
if (!url || !url.trim()) {
return {
success: false,
error: 'No transcript URL provided',
};
}
// Prepare authentication header if credentials provided
const authHeader =
username && password
? "Basic " + Buffer.from(`${username}:${password}`).toString("base64")
: undefined;
const headers: Record<string, string> = {
'User-Agent': 'LiveDash-Transcript-Fetcher/1.0',
};
if (authHeader) {
headers.Authorization = authHeader;
}
// Fetch the transcript with timeout
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), 30000); // 30 second timeout
const response = await fetch(url, {
method: 'GET',
headers,
signal: controller.signal,
});
clearTimeout(timeoutId);
if (!response.ok) {
return {
success: false,
error: `HTTP ${response.status}: ${response.statusText}`,
};
}
const content = await response.text();
if (!content || content.trim().length === 0) {
return {
success: false,
error: 'Empty transcript content',
};
}
return {
success: true,
content: content.trim(),
};
} catch (error) {
const errorMessage = error instanceof Error ? error.message : String(error);
// Handle common network errors
if (errorMessage.includes('ENOTFOUND')) {
return {
success: false,
error: 'Domain not found',
};
}
if (errorMessage.includes('ECONNREFUSED')) {
return {
success: false,
error: 'Connection refused',
};
}
if (errorMessage.includes('timeout')) {
return {
success: false,
error: 'Request timeout',
};
}
return {
success: false,
error: errorMessage,
};
}
}
/**
* Validate if a URL looks like a valid transcript URL
* @param url The URL to validate
* @returns boolean indicating if URL appears valid
*/
export function isValidTranscriptUrl(url: string): boolean {
if (!url || typeof url !== 'string') {
return false;
}
try {
const parsedUrl = new URL(url);
return parsedUrl.protocol === 'http:' || parsedUrl.protocol === 'https:';
} catch {
return false;
}
}
/**
* Extract session ID from transcript content if possible
* This is a helper function that can be enhanced based on transcript format
* @param content The transcript content
* @returns Extracted session ID or null
*/
export function extractSessionIdFromTranscript(content: string): string | null {
if (!content) return null;
// Look for common session ID patterns
const patterns = [
/session[_-]?id[:\s]*([a-zA-Z0-9-]+)/i,
/id[:\s]*([a-zA-Z0-9-]{8,})/i,
/^([a-zA-Z0-9-]{8,})/m, // First line might be session ID
];
for (const pattern of patterns) {
const match = content.match(pattern);
if (match && match[1]) {
return match[1].trim();
}
}
return null;
}

263
lib/transcriptParser.js Normal file
View File

@ -0,0 +1,263 @@
// Transcript parser utility - converts raw transcript text to structured messages
import { PrismaClient } from "@prisma/client";
const prisma = new PrismaClient();
/**
* Parses chat log string to JSON format with individual messages
* @param {string} logString - Raw transcript content
* @returns {Object} Parsed data with messages array and metadata
*/
export function parseChatLogToJSON(logString) {
// Convert to string if it's not already
const stringData =
typeof logString === "string" ? logString : String(logString);
// Split by lines and filter out empty lines
const lines = stringData.split("\n").filter((line) => line.trim() !== "");
const messages = [];
let currentMessage = null;
for (const line of lines) {
// Check if line starts with a timestamp pattern [DD.MM.YYYY HH:MM:SS]
const timestampMatch = line.match(
/^\[(\d{2}\.\d{2}\.\d{4} \d{2}:\d{2}:\d{2})\] (.+?): (.*)$/
);
if (timestampMatch) {
// If we have a previous message, push it to the array
if (currentMessage) {
messages.push(currentMessage);
}
// Parse the timestamp
const [, timestamp, sender, content] = timestampMatch;
// Convert DD.MM.YYYY HH:MM:SS to ISO format
const [datePart, timePart] = timestamp.split(" ");
const [day, month, year] = datePart.split(".");
const [hour, minute, second] = timePart.split(":");
const dateObject = new Date(year, month - 1, day, hour, minute, second);
// Create new message object
currentMessage = {
timestamp: dateObject.toISOString(),
role: sender,
content: content,
};
} else if (currentMessage) {
// This is a continuation of the previous message (multiline)
currentMessage.content += "\n" + line;
}
}
// Don't forget the last message
if (currentMessage) {
messages.push(currentMessage);
}
return {
messages: messages.sort((a, b) => {
// First sort by timestamp (ascending)
const timeComparison = new Date(a.timestamp) - new Date(b.timestamp);
if (timeComparison !== 0) {
return timeComparison;
}
// If timestamps are equal, sort by role (descending)
// This puts "User" before "Assistant" when timestamps are the same
return b.role.localeCompare(a.role);
}),
totalMessages: messages.length,
};
}
/**
* Stores parsed messages in the database for a session
* @param {string} sessionId - The session ID
* @param {Array} messages - Array of parsed message objects
*/
export async function storeMessagesForSession(sessionId, messages) {
try {
// First, delete any existing messages for this session
await prisma.message.deleteMany({
where: { sessionId },
});
// Then insert the new messages
const messageData = messages.map((message, index) => ({
sessionId,
timestamp: new Date(message.timestamp),
role: message.role,
content: message.content,
order: index,
}));
if (messageData.length > 0) {
await prisma.message.createMany({
data: messageData,
});
// Extract actual end time from the latest message
const latestMessage = messages.reduce((latest, current) => {
return new Date(current.timestamp) > new Date(latest.timestamp)
? current
: latest;
});
// Update the session's endTime with the actual conversation end time
await prisma.session.update({
where: { id: sessionId },
data: {
endTime: new Date(latestMessage.timestamp),
},
});
process.stdout.write(
`[TranscriptParser] Updated session ${sessionId} endTime to ${latestMessage.timestamp}\n`
);
}
process.stdout.write(
`[TranscriptParser] Stored ${messageData.length} messages for session ${sessionId}\n`
);
return messageData.length;
} catch (error) {
process.stderr.write(
`[TranscriptParser] Error storing messages for session ${sessionId}: ${error}\n`
);
throw error;
}
}
/**
* Processes and stores transcript for a single session
* @param {string} sessionId - The session ID
* @param {string} transcriptContent - Raw transcript content
* @returns {Promise<Object>} Processing result with message count
*/
export async function processTranscriptForSession(
sessionId,
transcriptContent
) {
if (!transcriptContent || transcriptContent.trim() === "") {
throw new Error("No transcript content provided");
}
try {
// Parse the transcript
const parsed = parseChatLogToJSON(transcriptContent);
// Store messages in database
const messageCount = await storeMessagesForSession(
sessionId,
parsed.messages
);
return {
sessionId,
messageCount,
totalMessages: parsed.totalMessages,
success: true,
};
} catch (error) {
process.stderr.write(
`[TranscriptParser] Error processing transcript for session ${sessionId}: ${error}\n`
);
throw error;
}
}
/**
* Processes transcripts for all sessions that have transcript content but no parsed messages
*/
export async function processAllUnparsedTranscripts() {
process.stdout.write(
"[TranscriptParser] Starting to process unparsed transcripts...\n"
);
try {
// Find sessions with transcript content but no messages
const sessionsToProcess = await prisma.session.findMany({
where: {
AND: [
{ transcriptContent: { not: null } },
{ transcriptContent: { not: "" } },
],
},
include: {
messages: true,
},
});
// Filter to only sessions without messages
const unparsedSessions = sessionsToProcess.filter(
(session) => session.messages.length === 0
);
if (unparsedSessions.length === 0) {
process.stdout.write(
"[TranscriptParser] No unparsed transcripts found.\n"
);
return { processed: 0, errors: 0 };
}
process.stdout.write(
`[TranscriptParser] Found ${unparsedSessions.length} sessions with unparsed transcripts.\n`
);
let successCount = 0;
let errorCount = 0;
for (const session of unparsedSessions) {
try {
const result = await processTranscriptForSession(
session.id,
session.transcriptContent
);
process.stdout.write(
`[TranscriptParser] Processed session ${session.id}: ${result.messageCount} messages\n`
);
successCount++;
} catch (error) {
process.stderr.write(
`[TranscriptParser] Failed to process session ${session.id}: ${error}\n`
);
errorCount++;
}
}
process.stdout.write(
`[TranscriptParser] Completed processing. Success: ${successCount}, Errors: ${errorCount}\n`
);
return { processed: successCount, errors: errorCount };
} catch (error) {
process.stderr.write(
`[TranscriptParser] Error in processAllUnparsedTranscripts: ${error}\n`
);
throw error;
}
}
/**
* Gets parsed messages for a session
* @param {string} sessionId - The session ID
* @returns {Promise<Array>} Array of message objects
*/
export async function getMessagesForSession(sessionId) {
try {
const messages = await prisma.message.findMany({
where: { sessionId },
orderBy: { order: "asc" },
});
return messages;
} catch (error) {
process.stderr.write(
`[TranscriptParser] Error getting messages for session ${sessionId}: ${error}\n`
);
throw error;
}
}

View File

@ -1,360 +0,0 @@
// Transcript parsing utility for converting raw transcript content into structured messages
import { prisma } from './prisma.js';
export interface ParsedMessage {
sessionId: string;
timestamp: Date;
role: string;
content: string;
order: number;
}
export interface TranscriptParseResult {
success: boolean;
messages?: ParsedMessage[];
error?: string;
}
/**
* Parse European date format (DD.MM.YYYY HH:mm:ss) to Date object
*/
function parseEuropeanDate(dateStr: string): Date {
const match = dateStr.match(/(\d{2})\.(\d{2})\.(\d{4}) (\d{2}):(\d{2}):(\d{2})/);
if (!match) {
throw new Error(`Invalid date format: ${dateStr}`);
}
const [, day, month, year, hour, minute, second] = match;
return new Date(
parseInt(year, 10),
parseInt(month, 10) - 1, // JavaScript months are 0-indexed
parseInt(day, 10),
parseInt(hour, 10),
parseInt(minute, 10),
parseInt(second, 10)
);
}
/**
* Parse raw transcript content into structured messages
* @param content Raw transcript content
* @param startTime Session start time
* @param endTime Session end time
* @returns Parsed messages with timestamps
*/
export function parseTranscriptToMessages(
content: string,
startTime: Date,
endTime: Date
): TranscriptParseResult {
try {
if (!content || !content.trim()) {
return {
success: false,
error: 'Empty transcript content'
};
}
const messages: ParsedMessage[] = [];
const lines = content.split('\n');
let currentMessage: { role: string; content: string; timestamp?: string } | null = null;
let order = 0;
for (const line of lines) {
const trimmedLine = line.trim();
// Skip empty lines
if (!trimmedLine) {
continue;
}
// Check if line starts with a timestamp and role [DD.MM.YYYY HH:MM:SS] Role: content
const timestampRoleMatch = trimmedLine.match(/^\[(\d{2}\.\d{2}\.\d{4} \d{2}:\d{2}:\d{2})\]\s+(User|Assistant|System|user|assistant|system):\s*(.*)$/i);
// Check if line starts with just a role (User:, Assistant:, System:, etc.)
const roleMatch = trimmedLine.match(/^(User|Assistant|System|user|assistant|system):\s*(.*)$/i);
if (timestampRoleMatch) {
// Save previous message if exists
if (currentMessage) {
messages.push({
sessionId: '', // Will be set by caller
timestamp: new Date(), // Will be calculated below
role: currentMessage.role,
content: currentMessage.content.trim(),
order: order++
});
}
// Start new message with timestamp
const timestamp = timestampRoleMatch[1];
const role = timestampRoleMatch[2].charAt(0).toUpperCase() + timestampRoleMatch[2].slice(1).toLowerCase();
const content = timestampRoleMatch[3] || '';
currentMessage = {
role,
content,
timestamp // Store the timestamp for later parsing
};
} else if (roleMatch) {
// Save previous message if exists
if (currentMessage) {
messages.push({
sessionId: '', // Will be set by caller
timestamp: new Date(), // Will be calculated below
role: currentMessage.role,
content: currentMessage.content.trim(),
order: order++
});
}
// Start new message without timestamp
const role = roleMatch[1].charAt(0).toUpperCase() + roleMatch[1].slice(1).toLowerCase();
const content = roleMatch[2] || '';
currentMessage = {
role,
content
};
} else if (currentMessage) {
// Continue previous message (multi-line)
currentMessage.content += '\n' + trimmedLine;
}
// If no current message and no role match, skip the line (orphaned content)
}
// Save the last message
if (currentMessage) {
messages.push({
sessionId: '', // Will be set by caller
timestamp: new Date(), // Will be calculated below
role: currentMessage.role,
content: currentMessage.content.trim(),
order: order++
});
}
if (messages.length === 0) {
return {
success: false,
error: 'No messages found in transcript'
};
}
// Calculate timestamps - use parsed timestamps if available, otherwise distribute across session duration
const hasTimestamps = messages.some(msg => (msg as any).timestamp);
if (hasTimestamps) {
// Use parsed timestamps from the transcript
messages.forEach((message, index) => {
const msgWithTimestamp = message as any;
if (msgWithTimestamp.timestamp) {
try {
message.timestamp = parseEuropeanDate(msgWithTimestamp.timestamp);
} catch (error) {
// Fallback to distributed timestamp if parsing fails
const sessionDurationMs = endTime.getTime() - startTime.getTime();
const messageInterval = messages.length > 1 ? sessionDurationMs / (messages.length - 1) : 0;
message.timestamp = new Date(startTime.getTime() + (index * messageInterval));
}
} else {
// Fallback to distributed timestamp
const sessionDurationMs = endTime.getTime() - startTime.getTime();
const messageInterval = messages.length > 1 ? sessionDurationMs / (messages.length - 1) : 0;
message.timestamp = new Date(startTime.getTime() + (index * messageInterval));
}
});
} else {
// Distribute messages across session duration
const sessionDurationMs = endTime.getTime() - startTime.getTime();
const messageInterval = messages.length > 1 ? sessionDurationMs / (messages.length - 1) : 0;
messages.forEach((message, index) => {
message.timestamp = new Date(startTime.getTime() + (index * messageInterval));
});
}
return {
success: true,
messages
};
} catch (error) {
return {
success: false,
error: error instanceof Error ? error.message : String(error)
};
}
}
/**
* Store parsed messages in the database for a session
* @param sessionId The session ID
* @param messages Array of parsed messages
*/
export async function storeMessagesForSession(
sessionId: string,
messages: ParsedMessage[]
): Promise<void> {
// Delete existing messages for this session (in case of re-processing)
await prisma.message.deleteMany({
where: { sessionId }
});
// Create new messages
const messagesWithSessionId = messages.map(msg => ({
...msg,
sessionId
}));
await prisma.message.createMany({
data: messagesWithSessionId
});
}
/**
* Process transcript for a single session
* @param sessionId The session ID to process
*/
export async function processSessionTranscript(sessionId: string): Promise<void> {
// Get the session and its import data
const session = await prisma.session.findUnique({
where: { id: sessionId },
include: {
import: true
}
});
if (!session) {
throw new Error(`Session not found: ${sessionId}`);
}
if (!session.import) {
throw new Error(`No import data found for session: ${sessionId}`);
}
if (!session.import.rawTranscriptContent) {
throw new Error(`No transcript content found for session: ${sessionId}`);
}
// Parse the start and end times
const startTime = parseEuropeanDate(session.import.startTimeRaw);
const endTime = parseEuropeanDate(session.import.endTimeRaw);
// Parse the transcript
const parseResult = parseTranscriptToMessages(
session.import.rawTranscriptContent,
startTime,
endTime
);
if (!parseResult.success) {
throw new Error(`Failed to parse transcript: ${parseResult.error}`);
}
// Store the messages
await storeMessagesForSession(sessionId, parseResult.messages!);
console.log(`✅ Processed ${parseResult.messages!.length} messages for session ${sessionId}`);
}
/**
* Process all sessions that have transcript content but no messages
*/
export async function processAllUnparsedTranscripts(): Promise<void> {
console.log('🔍 Finding sessions with unparsed transcripts...');
// Find sessions that have transcript content but no messages
const sessionsToProcess = await prisma.session.findMany({
where: {
import: {
rawTranscriptContent: {
not: null
}
},
messages: {
none: {}
}
},
include: {
import: true,
_count: {
select: {
messages: true
}
}
}
});
console.log(`📋 Found ${sessionsToProcess.length} sessions to process`);
let processed = 0;
let errors = 0;
for (const session of sessionsToProcess) {
try {
await processSessionTranscript(session.id);
processed++;
} catch (error) {
console.error(`❌ Error processing session ${session.id}:`, error);
errors++;
}
}
console.log(`\n📊 Processing complete:`);
console.log(` ✅ Successfully processed: ${processed} sessions`);
console.log(` ❌ Errors: ${errors} sessions`);
console.log(` 📝 Total messages created: ${await getTotalMessageCount()}`);
}
/**
* Get total count of messages in the database
*/
export async function getTotalMessageCount(): Promise<number> {
const result = await prisma.message.count();
return result;
}
/**
* Get messages for a specific session
* @param sessionId The session ID
* @returns Array of messages ordered by order field
*/
export async function getMessagesForSession(sessionId: string) {
return await prisma.message.findMany({
where: { sessionId },
orderBy: { order: 'asc' }
});
}
/**
* Get parsing statistics
*/
export async function getParsingStats() {
const totalSessions = await prisma.session.count();
const sessionsWithTranscripts = await prisma.session.count({
where: {
import: {
rawTranscriptContent: {
not: null
}
}
}
});
const sessionsWithMessages = await prisma.session.count({
where: {
messages: {
some: {}
}
}
});
const totalMessages = await getTotalMessageCount();
return {
totalSessions,
sessionsWithTranscripts,
sessionsWithMessages,
unparsedSessions: sessionsWithTranscripts - sessionsWithMessages,
totalMessages
};
}

View File

@ -1,5 +1,26 @@
import { Session as NextAuthSession } from "next-auth"; import { Session as NextAuthSession } from "next-auth";
// Standardized enums
export type SentimentCategory = "positive" | "neutral" | "negative";
export const VALID_CATEGORIES = [
"Schedule & Hours",
"Leave & Vacation",
"Sick Leave & Recovery",
"Salary & Compensation",
"Contract & Hours",
"Onboarding",
"Offboarding",
"Workwear & Staff Pass",
"Team & Contacts",
"Personal Questions",
"Access & Login",
"Social questions",
"Unrecognized / Other",
] as const;
export type ValidCategory = (typeof VALID_CATEGORIES)[number];
export interface UserSession extends NextAuthSession { export interface UserSession extends NextAuthSession {
user: { user: {
id?: string; id?: string;
@ -38,7 +59,7 @@ export interface User {
export interface Message { export interface Message {
id: string; id: string;
sessionId: string; sessionId: string;
timestamp: Date | null; timestamp: Date;
role: string; // "User", "Assistant", "System", etc. role: string; // "User", "Assistant", "System", etc.
content: string; content: string;
order: number; // Order within the conversation (0, 1, 2, ...) order: number; // Order within the conversation (0, 1, 2, ...)
@ -54,7 +75,7 @@ export interface ChatSession {
language?: string | null; language?: string | null;
country?: string | null; country?: string | null;
ipAddress?: string | null; ipAddress?: string | null;
sentiment?: string | null; // Now a SentimentCategory enum: "POSITIVE", "NEUTRAL", "NEGATIVE" sentiment?: string | null;
messagesSent?: number; messagesSent?: number;
startTime: Date; startTime: Date;
endTime?: Date | null; endTime?: Date | null;
@ -65,11 +86,15 @@ export interface ChatSession {
avgResponseTime?: number | null; avgResponseTime?: number | null;
escalated?: boolean; escalated?: boolean;
forwardedHr?: boolean; forwardedHr?: boolean;
tokens?: number;
tokensEur?: number;
initialMsg?: string; initialMsg?: string;
fullTranscriptUrl?: string | null; fullTranscriptUrl?: string | null;
processed?: boolean | null; // Flag for post-processing status
validData?: boolean | null; // Flag for data quality (false = exclude from analytics)
questions?: string | null; // JSON array of questions asked by user
summary?: string | null; // Brief summary of the conversation summary?: string | null; // Brief summary of the conversation
messages?: Message[]; // Parsed messages from transcript messages?: Message[]; // Parsed messages from transcript
transcriptContent?: string | null; // Full transcript content
} }
export interface SessionQuery { export interface SessionQuery {

1
lib/workflow.ts Normal file
View File

@ -0,0 +1 @@
import { prisma } from "./prisma";import { processUnprocessedSessions } from "./processingSchedulerNoCron";import { fileURLToPath } from "url";import { dirname, join } from "path";import { readFileSync } from "fs";const __filename = fileURLToPath(import.meta.url);const __dirname = dirname(__filename);const envPath = join(__dirname, "..", ".env.local");try { const envFile = readFileSync(envPath, "utf8"); const envVars = envFile .split("\n") .filter((line) => line.trim() && !line.startsWith("#")); envVars.forEach((line) => { const [key, ...valueParts] = line.split("="); if (key && valueParts.length > 0) { const value = valueParts.join("=").trim(); if (!process.env[key.trim()]) { process.env[key.trim()] = value; } } });} catch (error) {}async function fetchTranscriptContent( url: string, username?: string, password?: string): Promise<string | null> { try { const authHeader = username && password ? "Basic " + Buffer.from(`${username}:${password}`).toString("base64") : undefined; const response = await fetch(url, { headers: authHeader ? { Authorization: authHeader } : {}, }); if (!response.ok) { process.stderr.write( `Error fetching transcript: ${response.statusText}\n` ); return null; } return await response.text(); } catch (error) { process.stderr.write(`Failed to fetch transcript: ${error}\n`); return null; }}export async function triggerCompleteWorkflow(): Promise<{ message: string }> { try { const sessionsWithoutMessages = await prisma.session.count({ where: { messages: { none: {} }, fullTranscriptUrl: { not: null } } }); if (sessionsWithoutMessages > 0) { console.log(`[Complete Workflow] Fetching transcripts for ${sessionsWithoutMessages} sessions`); const sessionsToProcess = await prisma.session.findMany({ where: { AND: [ { fullTranscriptUrl: { not: null } }, { messages: { none: {} } }, ], }, include: { company: true, }, take: 20, }); for (const session of sessionsToProcess) { try { if (!session.fullTranscriptUrl) continue; const transcriptContent = await fetchTranscriptContent( session.fullTranscriptUrl, session.company.csvUsername || undefined, session.company.csvPassword || undefined ); if (!transcriptContent) { console.log(`No transcript content for session ${session.id}`); continue; } const lines = transcriptContent.split("\n").filter((line) => line.trim()); const messages: Array<{ sessionId: string; role: string; content: string; timestamp: Date; order: number; }> = []; let messageOrder = 0; for (const line of lines) { const timestampMatch = line.match(/^\\[([^\]]+)\\]\\s*([^:]+):\\s*(.+)$/); if (timestampMatch) { const [, timestamp, role, content] = timestampMatch; const dateMatch = timestamp.match(/^(\\d{1,2})-(\\d{1,2})-(\\d{4}) (\\d{1,2}):(\\d{1,2}):(\\d{1,2})$/); let parsedTimestamp = new Date(); if (dateMatch) { const [, day, month, year, hour, minute, second] = dateMatch; parsedTimestamp = new Date( parseInt(year), parseInt(month) - 1, parseInt(day), parseInt(hour), parseInt(minute), parseInt(second) ); } messages.push({ sessionId: session.id, role: role.trim().toLowerCase(), content: content.trim(), timestamp: parsedTimestamp, order: messageOrder++, }); } } if (messages.length > 0) { await prisma.message.createMany({ data: messages as any, }); console.log(`Added ${messages.length} messages for session ${session.id}`); } } catch (error) { console.error(`Error processing session ${session.id}:`, error); } } } const unprocessedWithMessages = await prisma.session.count({ where: { processed: false, messages: { some: {} } } }); if (unprocessedWithMessages > 0) { console.log(`[Complete Workflow] Processing ${unprocessedWithMessages} sessions`); await processUnprocessedSessions(); } return { message: `Complete workflow finished successfully` }; } catch (error) { console.error('[Complete Workflow] Error:', error); throw error; }}

View File

@ -1,129 +0,0 @@
import { PrismaClient, ProcessingStage, ProcessingStatus } from '@prisma/client';
import { ProcessingStatusManager } from './lib/processingStatusManager';
const prisma = new PrismaClient();
async function migrateToRefactoredSystem() {
try {
console.log('=== MIGRATING TO REFACTORED PROCESSING SYSTEM ===\n');
// Get all existing sessions
const sessions = await prisma.session.findMany({
include: {
import: true,
messages: true,
sessionQuestions: true,
},
orderBy: { createdAt: 'asc' }
});
console.log(`Found ${sessions.length} sessions to migrate...\n`);
let migratedCount = 0;
for (const session of sessions) {
console.log(`Migrating session ${session.import?.externalSessionId || session.id}...`);
// Initialize processing status for this session
await ProcessingStatusManager.initializeSession(session.id);
// Determine the current state of each stage based on existing data
// 1. CSV_IMPORT - Always completed if session exists
await ProcessingStatusManager.completeStage(session.id, ProcessingStage.CSV_IMPORT, {
migratedFrom: 'existing_session',
importId: session.importId
});
// 2. TRANSCRIPT_FETCH - Check if transcript content exists
if (session.import?.rawTranscriptContent) {
await ProcessingStatusManager.completeStage(session.id, ProcessingStage.TRANSCRIPT_FETCH, {
migratedFrom: 'existing_transcript',
contentLength: session.import.rawTranscriptContent.length
});
} else if (!session.import?.fullTranscriptUrl) {
// No transcript URL - skip this stage
await ProcessingStatusManager.skipStage(session.id, ProcessingStage.TRANSCRIPT_FETCH, 'No transcript URL in original import');
} else {
// Has URL but no content - mark as pending for retry
console.log(` - Transcript fetch pending for ${session.import.externalSessionId}`);
}
// 3. SESSION_CREATION - Check if messages exist
if (session.messages.length > 0) {
await ProcessingStatusManager.completeStage(session.id, ProcessingStage.SESSION_CREATION, {
migratedFrom: 'existing_messages',
messageCount: session.messages.length
});
} else if (session.import?.rawTranscriptContent) {
// Has transcript but no messages - needs reprocessing
console.log(` - Session creation pending for ${session.import.externalSessionId} (has transcript but no messages)`);
} else {
// No transcript content - skip or mark as pending based on transcript fetch status
if (!session.import?.fullTranscriptUrl) {
await ProcessingStatusManager.skipStage(session.id, ProcessingStage.SESSION_CREATION, 'No transcript content available');
}
}
// 4. AI_ANALYSIS - Check if AI fields are populated
const hasAIAnalysis = session.summary || session.sentiment || session.category || session.language;
if (hasAIAnalysis) {
await ProcessingStatusManager.completeStage(session.id, ProcessingStage.AI_ANALYSIS, {
migratedFrom: 'existing_ai_analysis',
hasSummary: !!session.summary,
hasSentiment: !!session.sentiment,
hasCategory: !!session.category,
hasLanguage: !!session.language
});
} else {
// No AI analysis - mark as pending if session creation is complete
if (session.messages.length > 0) {
console.log(` - AI analysis pending for ${session.import?.externalSessionId}`);
}
}
// 5. QUESTION_EXTRACTION - Check if questions exist
if (session.sessionQuestions.length > 0) {
await ProcessingStatusManager.completeStage(session.id, ProcessingStage.QUESTION_EXTRACTION, {
migratedFrom: 'existing_questions',
questionCount: session.sessionQuestions.length
});
} else {
// No questions - mark as pending if AI analysis is complete
if (hasAIAnalysis) {
console.log(` - Question extraction pending for ${session.import?.externalSessionId}`);
}
}
migratedCount++;
if (migratedCount % 10 === 0) {
console.log(` Migrated ${migratedCount}/${sessions.length} sessions...`);
}
}
console.log(`\n✓ Successfully migrated ${migratedCount} sessions to the new processing system`);
// Show final status
console.log('\n=== MIGRATION COMPLETE - FINAL STATUS ===');
const pipelineStatus = await ProcessingStatusManager.getPipelineStatus();
const stages = ['CSV_IMPORT', 'TRANSCRIPT_FETCH', 'SESSION_CREATION', 'AI_ANALYSIS', 'QUESTION_EXTRACTION'];
for (const stage of stages) {
const stageData = pipelineStatus.pipeline[stage] || {};
const pending = stageData.PENDING || 0;
const completed = stageData.COMPLETED || 0;
const skipped = stageData.SKIPPED || 0;
console.log(`${stage}: ${completed} completed, ${pending} pending, ${skipped} skipped`);
}
} catch (error) {
console.error('Error migrating to refactored system:', error);
} finally {
await prisma.$disconnect();
}
}
migrateToRefactoredSystem();

70
migration.sql Normal file
View File

@ -0,0 +1,70 @@
-- CreateTable
CREATE TABLE "Company" (
"id" TEXT NOT NULL PRIMARY KEY,
"name" TEXT NOT NULL,
"csvUrl" TEXT NOT NULL,
"csvUsername" TEXT,
"csvPassword" TEXT,
"sentimentAlert" REAL,
"dashboardOpts" TEXT,
"createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" DATETIME NOT NULL
);
-- CreateTable
CREATE TABLE "User" (
"id" TEXT NOT NULL PRIMARY KEY,
"email" TEXT NOT NULL,
"password" TEXT NOT NULL,
"companyId" TEXT NOT NULL,
"role" TEXT NOT NULL,
"resetToken" TEXT,
"resetTokenExpiry" DATETIME,
CONSTRAINT "User_companyId_fkey" FOREIGN KEY ("companyId") REFERENCES "Company" ("id") ON DELETE RESTRICT ON UPDATE CASCADE
);
-- CreateTable
CREATE TABLE "Session" (
"id" TEXT NOT NULL PRIMARY KEY,
"companyId" TEXT NOT NULL,
"startTime" DATETIME NOT NULL,
"endTime" DATETIME NOT NULL,
"ipAddress" TEXT,
"country" TEXT,
"language" TEXT,
"messagesSent" INTEGER,
"sentiment" TEXT,
"escalated" BOOLEAN,
"forwardedHr" BOOLEAN,
"fullTranscriptUrl" TEXT,
"avgResponseTime" REAL,
"tokens" INTEGER,
"tokensEur" REAL,
"category" TEXT,
"initialMsg" TEXT,
"processed" BOOLEAN NOT NULL DEFAULT false,
"validData" BOOLEAN NOT NULL DEFAULT true,
"questions" JSONB,
"summary" TEXT,
"createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT "Session_companyId_fkey" FOREIGN KEY ("companyId") REFERENCES "Company" ("id") ON DELETE RESTRICT ON UPDATE CASCADE
);
-- CreateTable
CREATE TABLE "Message" (
"id" TEXT NOT NULL PRIMARY KEY,
"sessionId" TEXT NOT NULL,
"timestamp" DATETIME NOT NULL,
"role" TEXT NOT NULL,
"content" TEXT NOT NULL,
"order" INTEGER NOT NULL,
"createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT "Message_sessionId_fkey" FOREIGN KEY ("sessionId") REFERENCES "Session" ("id") ON DELETE CASCADE ON UPDATE CASCADE
);
-- CreateIndex
CREATE UNIQUE INDEX "User_email_key" ON "User"("email");
-- CreateIndex
CREATE INDEX "Message_sessionId_order_idx" ON "Message"("sessionId", "order");

View File

@ -5,9 +5,24 @@ const nextConfig = {
reactStrictMode: true, reactStrictMode: true,
// Allow cross-origin requests from specific origins in development // Allow cross-origin requests from specific origins in development
allowedDevOrigins: [ allowedDevOrigins: [
"localhost", "127.0.0.1",
"127.0.0.1" "localhost"
], ],
// Disable Turbopack for now due to EISDIR error on Windows
webpack: (config, { isServer }) => {
if (!isServer) {
config.resolve.fallback = { fs: false, net: false, tls: false };
}
return config;
},
experimental: {
appDir: true,
serverComponentsExternalPackages: ['@prisma/client', 'bcryptjs'],
// disable the new Turbopack engine
// This is a temporary workaround for the EISDIR error on Windows
// Remove this once the issue is resolved in Next.js or Turbopack
turbopack: false,
},
}; };
export default nextConfig; export default nextConfig;

10253
package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -5,69 +5,60 @@
"private": true, "private": true,
"scripts": { "scripts": {
"build": "next build", "build": "next build",
"dev": "tsx server.ts", "dev": "next dev",
"dev:next-only": "next dev --turbopack", "dev:with-server": "tsx server.ts",
"format": "npx prettier --write .", "format": "npx prettier --write .",
"format:check": "npx prettier --check .", "format:check": "npx prettier --check .",
"lint": "next lint", "lint": "next lint",
"lint:fix": "npx eslint --fix", "lint:fix": "npx eslint --fix .",
"prisma:generate": "prisma generate", "prisma:generate": "prisma generate",
"prisma:migrate": "prisma migrate dev", "prisma:migrate": "prisma migrate dev",
"prisma:seed": "tsx prisma/seed.ts", "prisma:seed": "tsx prisma/seed.ts",
"prisma:push": "prisma db push", "prisma:push": "prisma db push",
"prisma:push:force": "prisma db push --force-reset",
"prisma:studio": "prisma studio", "prisma:studio": "prisma studio",
"start": "node server.mjs", "start": "tsx server.ts",
"test": "vitest run",
"test:watch": "vitest",
"test:coverage": "vitest run --coverage",
"lint:md": "markdownlint-cli2 \"**/*.md\" \"!.trunk/**\" \"!.venv/**\" \"!node_modules/**\"", "lint:md": "markdownlint-cli2 \"**/*.md\" \"!.trunk/**\" \"!.venv/**\" \"!node_modules/**\"",
"lint:md:fix": "markdownlint-cli2 --fix \"**/*.md\" \"!.trunk/**\" \"!.venv/**\" \"!node_modules/**\"" "lint:md:fix": "markdownlint-cli2 --fix \"**/*.md\" \"!.trunk/**\" \"!.venv/**\" \"!node_modules/**\""
}, },
"dependencies": { "dependencies": {
"@prisma/adapter-pg": "^6.10.1",
"@prisma/client": "^6.10.1", "@prisma/client": "^6.10.1",
"@radix-ui/react-dropdown-menu": "^2.1.15",
"@radix-ui/react-separator": "^1.1.7",
"@radix-ui/react-slot": "^1.2.3",
"@radix-ui/react-tooltip": "^1.2.7",
"@rapideditor/country-coder": "^5.4.0", "@rapideditor/country-coder": "^5.4.0",
"@types/d3": "^7.4.3", "@types/d3": "^7.4.3",
"@types/d3-cloud": "^1.2.9", "@types/d3-cloud": "^1.2.9",
"@types/d3-selection": "^3.0.11",
"@types/geojson": "^7946.0.16", "@types/geojson": "^7946.0.16",
"@types/leaflet": "^1.9.18", "@types/leaflet": "^1.9.18",
"@types/node-fetch": "^2.6.12", "@types/node-fetch": "^2.6.12",
"bcryptjs": "^3.0.2", "bcryptjs": "^3.0.2",
"chart.js": "^4.0.0",
"chartjs-plugin-annotation": "^3.1.0",
"class-variance-authority": "^0.7.1", "class-variance-authority": "^0.7.1",
"clsx": "^2.1.1", "clsx": "^2.1.1",
"csv-parse": "^5.5.0", "csv-parse": "^5.5.0",
"d3": "^7.9.0", "d3": "^7.9.0",
"d3-cloud": "^1.2.7", "d3-cloud": "^1.2.7",
"d3-selection": "^3.0.0",
"i18n-iso-countries": "^7.14.0", "i18n-iso-countries": "^7.14.0",
"iso-639-1": "^3.1.5", "iso-639-1": "^3.1.5",
"leaflet": "^1.9.4", "leaflet": "^1.9.4",
"lucide-react": "^0.525.0", "lucide-react": "^0.523.0",
"next": "^15.3.2", "next": "^15.3.4",
"next-auth": "^4.24.11", "next-auth": "^4.24.11",
"node-cron": "^4.0.7", "node-cron": "^4.0.7",
"node-fetch": "^3.3.2", "node-fetch": "^3.3.2",
"picocolors": "^1.1.1",
"react": "^19.1.0", "react": "^19.1.0",
"react-chartjs-2": "^5.0.0",
"react-dom": "^19.1.0", "react-dom": "^19.1.0",
"react-leaflet": "^5.0.0", "react-leaflet": "^5.0.0",
"react-markdown": "^10.1.0", "react-markdown": "^10.1.0",
"recharts": "^3.0.2",
"rehype-raw": "^7.0.0", "rehype-raw": "^7.0.0",
"source-map-js": "^1.2.1",
"tailwind-merge": "^3.3.1" "tailwind-merge": "^3.3.1"
}, },
"devDependencies": { "devDependencies": {
"@eslint/eslintrc": "^3.3.1", "@eslint/eslintrc": "^3.3.1",
"@eslint/js": "^9.27.0", "@eslint/js": "^9.27.0",
"@playwright/test": "^1.52.0", "@playwright/test": "^1.52.0",
"@tailwindcss/postcss": "^4.1.11", "@tailwindcss/postcss": "^4.1.7",
"@testing-library/dom": "^10.4.0",
"@testing-library/react": "^16.3.0",
"@types/bcryptjs": "^2.4.2", "@types/bcryptjs": "^2.4.2",
"@types/node": "^22.15.21", "@types/node": "^22.15.21",
"@types/node-cron": "^3.0.8", "@types/node-cron": "^3.0.8",
@ -75,24 +66,19 @@
"@types/react-dom": "^19.1.5", "@types/react-dom": "^19.1.5",
"@typescript-eslint/eslint-plugin": "^8.32.1", "@typescript-eslint/eslint-plugin": "^8.32.1",
"@typescript-eslint/parser": "^8.32.1", "@typescript-eslint/parser": "^8.32.1",
"@vitejs/plugin-react": "^4.6.0",
"@vitest/coverage-v8": "^3.2.4",
"eslint": "^9.27.0", "eslint": "^9.27.0",
"eslint-config-next": "^15.3.2", "eslint-config-next": "^15.3.2",
"eslint-plugin-prettier": "^5.4.0", "eslint-plugin-prettier": "^5.4.0",
"jsdom": "^26.1.0",
"markdownlint-cli2": "^0.18.1", "markdownlint-cli2": "^0.18.1",
"postcss": "^8.5.3", "postcss": "^8.5.3",
"prettier": "^3.5.3", "prettier": "^3.5.3",
"prettier-plugin-jinja-template": "^2.1.0", "prettier-plugin-jinja-template": "^2.1.0",
"prisma": "^6.10.1", "prisma": "^6.10.1",
"tailwindcss": "^4.1.11", "tailwindcss": "^4.1.7",
"ts-node": "^10.9.2", "ts-node": "^10.9.2",
"tsx": "^4.20.3", "tsx": "^4.20.3",
"tw-animate-css": "^1.3.4", "tw-animate-css": "^1.3.4",
"typescript": "^5.0.0", "typescript": "^5.0.0"
"vite-tsconfig-paths": "^5.1.4",
"vitest": "^3.2.4"
}, },
"prettier": { "prettier": {
"bracketSpacing": true, "bracketSpacing": true,
@ -142,6 +128,5 @@
".git", ".git",
"*.json" "*.json"
] ]
}, }
"packageManager": "pnpm@10.12.4"
} }

8994
pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,70 @@
-- CreateTable
CREATE TABLE "Company" (
"id" TEXT NOT NULL PRIMARY KEY,
"name" TEXT NOT NULL,
"csvUrl" TEXT NOT NULL,
"csvUsername" TEXT,
"csvPassword" TEXT,
"sentimentAlert" REAL,
"dashboardOpts" TEXT,
"createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" DATETIME NOT NULL
);
-- CreateTable
CREATE TABLE "User" (
"id" TEXT NOT NULL PRIMARY KEY,
"email" TEXT NOT NULL,
"password" TEXT NOT NULL,
"companyId" TEXT NOT NULL,
"role" TEXT NOT NULL,
"resetToken" TEXT,
"resetTokenExpiry" DATETIME,
CONSTRAINT "User_companyId_fkey" FOREIGN KEY ("companyId") REFERENCES "Company" ("id") ON DELETE RESTRICT ON UPDATE CASCADE
);
-- CreateTable
CREATE TABLE "Session" (
"id" TEXT NOT NULL PRIMARY KEY,
"companyId" TEXT NOT NULL,
"startTime" DATETIME NOT NULL,
"endTime" DATETIME NOT NULL,
"ipAddress" TEXT,
"country" TEXT,
"language" TEXT,
"messagesSent" INTEGER,
"sentiment" REAL,
"sentimentCategory" TEXT,
"escalated" BOOLEAN,
"forwardedHr" BOOLEAN,
"fullTranscriptUrl" TEXT,
"avgResponseTime" REAL,
"tokens" INTEGER,
"tokensEur" REAL,
"category" TEXT,
"initialMsg" TEXT,
"processed" BOOLEAN NOT NULL DEFAULT false,
"validData" BOOLEAN NOT NULL DEFAULT true,
"questions" TEXT,
"summary" TEXT,
"createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT "Session_companyId_fkey" FOREIGN KEY ("companyId") REFERENCES "Company" ("id") ON DELETE RESTRICT ON UPDATE CASCADE
);
-- CreateTable
CREATE TABLE "Message" (
"id" TEXT NOT NULL PRIMARY KEY,
"sessionId" TEXT NOT NULL,
"timestamp" DATETIME NOT NULL,
"role" TEXT NOT NULL,
"content" TEXT NOT NULL,
"order" INTEGER NOT NULL,
"createdAt" DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT "Message_sessionId_fkey" FOREIGN KEY ("sessionId") REFERENCES "Session" ("id") ON DELETE CASCADE ON UPDATE CASCADE
);
-- CreateIndex
CREATE UNIQUE INDEX "User_email_key" ON "User"("email");
-- CreateIndex
CREATE INDEX "Message_sessionId_order_idx" ON "Message"("sessionId", "order");

View File

@ -1,227 +0,0 @@
-- CreateEnum
CREATE TYPE "UserRole" AS ENUM ('ADMIN', 'USER', 'AUDITOR');
-- CreateEnum
CREATE TYPE "SentimentCategory" AS ENUM ('POSITIVE', 'NEUTRAL', 'NEGATIVE');
-- CreateEnum
CREATE TYPE "SessionCategory" AS ENUM ('SCHEDULE_HOURS', 'LEAVE_VACATION', 'SICK_LEAVE_RECOVERY', 'SALARY_COMPENSATION', 'CONTRACT_HOURS', 'ONBOARDING', 'OFFBOARDING', 'WORKWEAR_STAFF_PASS', 'TEAM_CONTACTS', 'PERSONAL_QUESTIONS', 'ACCESS_LOGIN', 'SOCIAL_QUESTIONS', 'UNRECOGNIZED_OTHER');
-- CreateEnum
CREATE TYPE "ImportStatus" AS ENUM ('QUEUED', 'PROCESSING', 'DONE', 'ERROR');
-- CreateTable
CREATE TABLE "Company" (
"id" TEXT NOT NULL,
"name" TEXT NOT NULL,
"csvUrl" TEXT NOT NULL,
"csvUsername" TEXT,
"csvPassword" TEXT,
"sentimentAlert" DOUBLE PRECISION,
"dashboardOpts" JSONB,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL,
CONSTRAINT "Company_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "User" (
"id" TEXT NOT NULL,
"email" TEXT NOT NULL,
"password" TEXT NOT NULL,
"role" "UserRole" NOT NULL DEFAULT 'USER',
"companyId" TEXT NOT NULL,
"resetToken" TEXT,
"resetTokenExpiry" TIMESTAMP(3),
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL,
CONSTRAINT "User_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "Session" (
"id" TEXT NOT NULL,
"companyId" TEXT NOT NULL,
"importId" TEXT,
"startTime" TIMESTAMP(3) NOT NULL,
"endTime" TIMESTAMP(3) NOT NULL,
"ipAddress" TEXT,
"country" TEXT,
"fullTranscriptUrl" TEXT,
"avgResponseTime" DOUBLE PRECISION,
"initialMsg" TEXT,
"language" TEXT,
"messagesSent" INTEGER,
"sentiment" "SentimentCategory",
"escalated" BOOLEAN,
"forwardedHr" BOOLEAN,
"category" "SessionCategory",
"summary" TEXT,
"processed" BOOLEAN NOT NULL DEFAULT false,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL,
CONSTRAINT "Session_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "SessionImport" (
"id" TEXT NOT NULL,
"companyId" TEXT NOT NULL,
"externalSessionId" TEXT NOT NULL,
"startTimeRaw" TEXT NOT NULL,
"endTimeRaw" TEXT NOT NULL,
"ipAddress" TEXT,
"countryCode" TEXT,
"language" TEXT,
"messagesSent" INTEGER,
"sentimentRaw" TEXT,
"escalatedRaw" TEXT,
"forwardedHrRaw" TEXT,
"fullTranscriptUrl" TEXT,
"avgResponseTimeSeconds" DOUBLE PRECISION,
"tokens" INTEGER,
"tokensEur" DOUBLE PRECISION,
"category" TEXT,
"initialMessage" TEXT,
"rawTranscriptContent" TEXT,
"status" "ImportStatus" NOT NULL DEFAULT 'QUEUED',
"errorMsg" TEXT,
"processedAt" TIMESTAMP(3),
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT "SessionImport_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "Message" (
"id" TEXT NOT NULL,
"sessionId" TEXT NOT NULL,
"timestamp" TIMESTAMP(3),
"role" TEXT NOT NULL,
"content" TEXT NOT NULL,
"order" INTEGER NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT "Message_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "Question" (
"id" TEXT NOT NULL,
"content" TEXT NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT "Question_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "SessionQuestion" (
"id" TEXT NOT NULL,
"sessionId" TEXT NOT NULL,
"questionId" TEXT NOT NULL,
"order" INTEGER NOT NULL,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT "SessionQuestion_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "AIProcessingRequest" (
"id" TEXT NOT NULL,
"sessionId" TEXT NOT NULL,
"openaiRequestId" TEXT,
"model" TEXT NOT NULL,
"serviceTier" TEXT,
"systemFingerprint" TEXT,
"promptTokens" INTEGER NOT NULL,
"completionTokens" INTEGER NOT NULL,
"totalTokens" INTEGER NOT NULL,
"cachedTokens" INTEGER,
"audioTokensPrompt" INTEGER,
"reasoningTokens" INTEGER,
"audioTokensCompletion" INTEGER,
"acceptedPredictionTokens" INTEGER,
"rejectedPredictionTokens" INTEGER,
"promptTokenCost" DOUBLE PRECISION NOT NULL,
"completionTokenCost" DOUBLE PRECISION NOT NULL,
"totalCostEur" DOUBLE PRECISION NOT NULL,
"processingType" TEXT NOT NULL,
"success" BOOLEAN NOT NULL,
"errorMessage" TEXT,
"requestedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"completedAt" TIMESTAMP(3),
CONSTRAINT "AIProcessingRequest_pkey" PRIMARY KEY ("id")
);
-- CreateIndex
CREATE UNIQUE INDEX "User_email_key" ON "User"("email");
-- CreateIndex
CREATE UNIQUE INDEX "Session_importId_key" ON "Session"("importId");
-- CreateIndex
CREATE INDEX "Session_companyId_startTime_idx" ON "Session"("companyId", "startTime");
-- CreateIndex
CREATE UNIQUE INDEX "SessionImport_externalSessionId_key" ON "SessionImport"("externalSessionId");
-- CreateIndex
CREATE INDEX "SessionImport_status_idx" ON "SessionImport"("status");
-- CreateIndex
CREATE UNIQUE INDEX "SessionImport_companyId_externalSessionId_key" ON "SessionImport"("companyId", "externalSessionId");
-- CreateIndex
CREATE INDEX "Message_sessionId_order_idx" ON "Message"("sessionId", "order");
-- CreateIndex
CREATE UNIQUE INDEX "Message_sessionId_order_key" ON "Message"("sessionId", "order");
-- CreateIndex
CREATE UNIQUE INDEX "Question_content_key" ON "Question"("content");
-- CreateIndex
CREATE INDEX "SessionQuestion_sessionId_idx" ON "SessionQuestion"("sessionId");
-- CreateIndex
CREATE UNIQUE INDEX "SessionQuestion_sessionId_questionId_key" ON "SessionQuestion"("sessionId", "questionId");
-- CreateIndex
CREATE UNIQUE INDEX "SessionQuestion_sessionId_order_key" ON "SessionQuestion"("sessionId", "order");
-- CreateIndex
CREATE INDEX "AIProcessingRequest_sessionId_idx" ON "AIProcessingRequest"("sessionId");
-- CreateIndex
CREATE INDEX "AIProcessingRequest_requestedAt_idx" ON "AIProcessingRequest"("requestedAt");
-- CreateIndex
CREATE INDEX "AIProcessingRequest_model_idx" ON "AIProcessingRequest"("model");
-- AddForeignKey
ALTER TABLE "User" ADD CONSTRAINT "User_companyId_fkey" FOREIGN KEY ("companyId") REFERENCES "Company"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "Session" ADD CONSTRAINT "Session_companyId_fkey" FOREIGN KEY ("companyId") REFERENCES "Company"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "Session" ADD CONSTRAINT "Session_importId_fkey" FOREIGN KEY ("importId") REFERENCES "SessionImport"("id") ON DELETE SET NULL ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "SessionImport" ADD CONSTRAINT "SessionImport_companyId_fkey" FOREIGN KEY ("companyId") REFERENCES "Company"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "Message" ADD CONSTRAINT "Message_sessionId_fkey" FOREIGN KEY ("sessionId") REFERENCES "Session"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "SessionQuestion" ADD CONSTRAINT "SessionQuestion_sessionId_fkey" FOREIGN KEY ("sessionId") REFERENCES "Session"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "SessionQuestion" ADD CONSTRAINT "SessionQuestion_questionId_fkey" FOREIGN KEY ("questionId") REFERENCES "Question"("id") ON DELETE RESTRICT ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "AIProcessingRequest" ADD CONSTRAINT "AIProcessingRequest_sessionId_fkey" FOREIGN KEY ("sessionId") REFERENCES "Session"("id") ON DELETE CASCADE ON UPDATE CASCADE;

View File

@ -1,63 +0,0 @@
-- CreateTable
CREATE TABLE "AIModel" (
"id" TEXT NOT NULL,
"name" TEXT NOT NULL,
"provider" TEXT NOT NULL,
"maxTokens" INTEGER,
"isActive" BOOLEAN NOT NULL DEFAULT true,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
"updatedAt" TIMESTAMP(3) NOT NULL,
CONSTRAINT "AIModel_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "AIModelPricing" (
"id" TEXT NOT NULL,
"aiModelId" TEXT NOT NULL,
"promptTokenCost" DOUBLE PRECISION NOT NULL,
"completionTokenCost" DOUBLE PRECISION NOT NULL,
"effectiveFrom" TIMESTAMP(3) NOT NULL,
"effectiveUntil" TIMESTAMP(3),
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT "AIModelPricing_pkey" PRIMARY KEY ("id")
);
-- CreateTable
CREATE TABLE "CompanyAIModel" (
"id" TEXT NOT NULL,
"companyId" TEXT NOT NULL,
"aiModelId" TEXT NOT NULL,
"isDefault" BOOLEAN NOT NULL DEFAULT false,
"createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP,
CONSTRAINT "CompanyAIModel_pkey" PRIMARY KEY ("id")
);
-- CreateIndex
CREATE UNIQUE INDEX "AIModel_name_key" ON "AIModel"("name");
-- CreateIndex
CREATE INDEX "AIModel_provider_isActive_idx" ON "AIModel"("provider", "isActive");
-- CreateIndex
CREATE INDEX "AIModelPricing_aiModelId_effectiveFrom_idx" ON "AIModelPricing"("aiModelId", "effectiveFrom");
-- CreateIndex
CREATE INDEX "AIModelPricing_effectiveFrom_effectiveUntil_idx" ON "AIModelPricing"("effectiveFrom", "effectiveUntil");
-- CreateIndex
CREATE INDEX "CompanyAIModel_companyId_isDefault_idx" ON "CompanyAIModel"("companyId", "isDefault");
-- CreateIndex
CREATE UNIQUE INDEX "CompanyAIModel_companyId_aiModelId_key" ON "CompanyAIModel"("companyId", "aiModelId");
-- AddForeignKey
ALTER TABLE "AIModelPricing" ADD CONSTRAINT "AIModelPricing_aiModelId_fkey" FOREIGN KEY ("aiModelId") REFERENCES "AIModel"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "CompanyAIModel" ADD CONSTRAINT "CompanyAIModel_companyId_fkey" FOREIGN KEY ("companyId") REFERENCES "Company"("id") ON DELETE CASCADE ON UPDATE CASCADE;
-- AddForeignKey
ALTER TABLE "CompanyAIModel" ADD CONSTRAINT "CompanyAIModel_aiModelId_fkey" FOREIGN KEY ("aiModelId") REFERENCES "AIModel"("id") ON DELETE CASCADE ON UPDATE CASCADE;

View File

@ -1,3 +1,3 @@
# Please do not edit this file manually # Please do not edit this file manually
# It should be added in your version-control system (e.g., Git) # It should be added in your version-control system (e.g., Git)
provider = "postgresql" provider = "sqlite"

View File

@ -1,376 +1,74 @@
// Database schema, one company = one org, linked to users and CSV config
generator client { generator client {
provider = "prisma-client-js" provider = "prisma-client-js"
previewFeatures = ["driverAdapters"]
} }
datasource db { datasource db {
provider = "postgresql" provider = "sqlite"
url = env("DATABASE_URL") url = "file:./dev.db"
directUrl = env("DATABASE_URL_DIRECT")
} }
/**
* ENUMS fewer magic strings
*/
enum UserRole {
ADMIN
USER
AUDITOR
}
enum SentimentCategory {
POSITIVE
NEUTRAL
NEGATIVE
}
enum SessionCategory {
SCHEDULE_HOURS
LEAVE_VACATION
SICK_LEAVE_RECOVERY
SALARY_COMPENSATION
CONTRACT_HOURS
ONBOARDING
OFFBOARDING
WORKWEAR_STAFF_PASS
TEAM_CONTACTS
PERSONAL_QUESTIONS
ACCESS_LOGIN
SOCIAL_QUESTIONS
UNRECOGNIZED_OTHER
}
enum ProcessingStage {
CSV_IMPORT // SessionImport created
TRANSCRIPT_FETCH // Transcript content fetched
SESSION_CREATION // Session + Messages created
AI_ANALYSIS // AI processing completed
QUESTION_EXTRACTION // Questions extracted
}
enum ProcessingStatus {
PENDING
IN_PROGRESS
COMPLETED
FAILED
SKIPPED
}
/**
* COMPANY (multi-tenant root)
*/
model Company { model Company {
id String @id @default(uuid()) id String @id @default(uuid())
name String name String
csvUrl String csvUrl String // where to fetch CSV
csvUsername String? csvUsername String? // for basic auth
csvPassword String? csvPassword String?
sentimentAlert Float? sentimentAlert Float? // e.g. alert threshold for negative chats
dashboardOpts Json? // JSON column instead of opaque string dashboardOpts String? // JSON blob for per-company dashboard preferences
users User[]
users User[] @relation("CompanyUsers") sessions Session[]
sessions Session[] createdAt DateTime @default(now())
imports SessionImport[] updatedAt DateTime @updatedAt
companyAiModels CompanyAIModel[]
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
} }
/**
* USER (auth accounts)
*/
model User { model User {
id String @id @default(uuid()) id String @id @default(uuid())
email String @unique email String @unique
password String password String // hashed, use bcrypt
role UserRole @default(USER) company Company @relation(fields: [companyId], references: [id])
company Company @relation("CompanyUsers", fields: [companyId], references: [id], onDelete: Cascade)
companyId String companyId String
role String // 'admin' | 'user' | 'auditor'
resetToken String? resetToken String?
resetTokenExpiry DateTime? resetTokenExpiry DateTime?
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
} }
/**
* SESSION ↔ SESSIONIMPORT (1-to-1)
*/
/**
* 1. Normalised session ---------------------------
*/
model Session { model Session {
id String @id @default(uuid()) id String @id
company Company @relation(fields: [companyId], references: [id], onDelete: Cascade) company Company @relation(fields: [companyId], references: [id])
companyId String companyId String
startTime DateTime
/** endTime DateTime?
* 1-to-1 link back to the import row ipAddress String?
*/ country String?
import SessionImport? @relation("ImportToSession", fields: [importId], references: [id]) language String?
importId String? @unique messagesSent Int?
sentiment String? // "positive", "neutral", or "negative"
/** escalated Boolean?
* session-level data (processed from SessionImport) forwardedHr Boolean?
*/ fullTranscriptUrl String?
startTime DateTime avgResponseTime Float?
endTime DateTime tokens Int?
tokensEur Float?
// Direct copies from SessionImport (minimal processing) category String?
ipAddress String? initialMsg String?
country String? // from countryCode processed Boolean @default(false)
fullTranscriptUrl String? validData Boolean @default(true)
avgResponseTime Float? // from avgResponseTimeSeconds questions Json?
initialMsg String? // from initialMessage summary String?
messages Message[]
// AI-processed fields (calculated from Messages or AI analysis) createdAt DateTime @default(now())
language String? // AI-detected from Messages
messagesSent Int? // Calculated from Message count
sentiment SentimentCategory? // AI-analyzed (changed from Float to enum)
escalated Boolean? // AI-detected
forwardedHr Boolean? // AI-detected
category SessionCategory? // AI-categorized (changed to enum)
// AI-generated fields
summary String? // AI-generated summary
/**
* Relationships
*/
messages Message[] // Individual conversation messages
sessionQuestions SessionQuestion[] // Questions asked in this session
aiProcessingRequests AIProcessingRequest[] // AI processing cost tracking
processingStatus SessionProcessingStatus[] // Processing pipeline status
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
@@index([companyId, startTime])
} }
/**
* 2. Raw CSV row (pure data storage) ----------
*/
model SessionImport {
id String @id @default(uuid())
company Company @relation(fields: [companyId], references: [id], onDelete: Cascade)
companyId String
/**
* 1-to-1 back-relation; NO fields/references here
*/
session Session? @relation("ImportToSession")
// ─── 16 CSV columns 1-to-1 ────────────────────────
externalSessionId String @unique // value from CSV column 1
startTimeRaw String
endTimeRaw String
ipAddress String?
countryCode String?
language String?
messagesSent Int?
sentimentRaw String?
escalatedRaw String?
forwardedHrRaw String?
fullTranscriptUrl String?
avgResponseTimeSeconds Float?
tokens Int?
tokensEur Float?
category String?
initialMessage String?
// ─── Raw transcript content ─────────────────────────
rawTranscriptContent String? // Fetched content from fullTranscriptUrl
// ─── bookkeeping ─────────────────────────────────
createdAt DateTime @default(now())
@@unique([companyId, externalSessionId]) // idempotent re-imports
}
/**
* MESSAGE (individual lines)
*/
model Message { model Message {
id String @id @default(uuid()) id String @id @default(uuid())
session Session @relation(fields: [sessionId], references: [id], onDelete: Cascade)
session Session @relation(fields: [sessionId], references: [id], onDelete: Cascade)
sessionId String sessionId String
timestamp DateTime // When the message was sent
timestamp DateTime? role String // "User", "Assistant", "System", etc.
role String // "user" | "assistant" | "system" free-form keeps migration easy content String // The message content
content String order Int // Order within the conversation (0, 1, 2, ...)
order Int
createdAt DateTime @default(now())
@@unique([sessionId, order]) // guards against duplicate order values
@@index([sessionId, order])
}
/**
* UNIFIED PROCESSING STATUS TRACKING
*/
model SessionProcessingStatus {
id String @id @default(uuid())
sessionId String
stage ProcessingStage
status ProcessingStatus @default(PENDING)
startedAt DateTime?
completedAt DateTime?
errorMessage String?
retryCount Int @default(0)
// Stage-specific metadata (e.g., AI costs, token usage, fetch details)
metadata Json?
session Session @relation(fields: [sessionId], references: [id], onDelete: Cascade)
@@unique([sessionId, stage])
@@index([stage, status])
@@index([sessionId])
}
/**
* QUESTION MANAGEMENT (separate from Session for better analytics)
*/
model Question {
id String @id @default(uuid())
content String @unique // The actual question text
createdAt DateTime @default(now())
// Relationships
sessionQuestions SessionQuestion[]
}
model SessionQuestion {
id String @id @default(uuid())
sessionId String
questionId String
order Int // Order within the session
createdAt DateTime @default(now())
// Relationships
session Session @relation(fields: [sessionId], references: [id], onDelete: Cascade)
question Question @relation(fields: [questionId], references: [id])
@@unique([sessionId, questionId]) // Prevent duplicate questions per session
@@unique([sessionId, order]) // Ensure unique ordering
@@index([sessionId])
}
/**
* AI PROCESSING COST TRACKING
*/
model AIProcessingRequest {
id String @id @default(uuid())
sessionId String
// OpenAI Request Details
openaiRequestId String? // "chatcmpl-Bn8IH9UM8t7luZVWnwZG7CVJ0kjPo"
model String // "gpt-4o-2024-08-06"
serviceTier String? // "default"
systemFingerprint String? // "fp_07871e2ad8"
// Token Usage (from usage object)
promptTokens Int // 11
completionTokens Int // 9
totalTokens Int // 20
// Detailed Token Breakdown
cachedTokens Int? // prompt_tokens_details.cached_tokens
audioTokensPrompt Int? // prompt_tokens_details.audio_tokens
reasoningTokens Int? // completion_tokens_details.reasoning_tokens
audioTokensCompletion Int? // completion_tokens_details.audio_tokens
acceptedPredictionTokens Int? // completion_tokens_details.accepted_prediction_tokens
rejectedPredictionTokens Int? // completion_tokens_details.rejected_prediction_tokens
// Cost Calculation
promptTokenCost Float // Cost per prompt token (varies by model)
completionTokenCost Float // Cost per completion token (varies by model)
totalCostEur Float // Calculated total cost in EUR
// Processing Context
processingType String // "session_analysis", "reprocessing", etc.
success Boolean // Whether the request succeeded
errorMessage String? // If failed, what went wrong
// Timestamps
requestedAt DateTime @default(now())
completedAt DateTime?
// Relationships
session Session @relation(fields: [sessionId], references: [id], onDelete: Cascade)
@@index([sessionId])
@@index([requestedAt])
@@index([model])
}
/**
* AI MODEL MANAGEMENT SYSTEM
*/
/**
* AI Model definitions (without pricing)
*/
model AIModel {
id String @id @default(uuid())
name String @unique // "gpt-4o", "gpt-4-turbo", etc.
provider String // "openai", "anthropic", etc.
maxTokens Int? // Maximum tokens for this model
isActive Boolean @default(true)
// Relationships
pricing AIModelPricing[]
companyModels CompanyAIModel[]
createdAt DateTime @default(now())
updatedAt DateTime @updatedAt
@@index([provider, isActive])
}
/**
* Time-based pricing for AI models
*/
model AIModelPricing {
id String @id @default(uuid())
aiModelId String
promptTokenCost Float // Cost per prompt token in USD
completionTokenCost Float // Cost per completion token in USD
effectiveFrom DateTime // When this pricing becomes effective
effectiveUntil DateTime? // When this pricing expires (null = current)
// Relationships
aiModel AIModel @relation(fields: [aiModelId], references: [id], onDelete: Cascade)
createdAt DateTime @default(now()) createdAt DateTime @default(now())
@@index([aiModelId, effectiveFrom]) @@index([sessionId, order]) // Index for efficient ordering queries
@@index([effectiveFrom, effectiveUntil])
}
/**
* Company-specific AI model assignments
*/
model CompanyAIModel {
id String @id @default(uuid())
companyId String
aiModelId String
isDefault Boolean @default(false) // Is this the default model for the company?
// Relationships
company Company @relation(fields: [companyId], references: [id], onDelete: Cascade)
aiModel AIModel @relation(fields: [aiModelId], references: [id], onDelete: Cascade)
createdAt DateTime @default(now())
@@unique([companyId, aiModelId]) // Prevent duplicate assignments
@@index([companyId, isDefault])
} }

39
prisma/seed.js Normal file
View File

@ -0,0 +1,39 @@
// seed.js - Create initial admin user and company
import { PrismaClient } from "@prisma/client";
import bcrypt from "bcryptjs";
const prisma = new PrismaClient();
async function main() {
// Create a company
const company = await prisma.company.create({
data: {
name: "Demo Company",
csvUrl: "https://example.com/data.csv", // Replace with a real URL if available
},
});
// Create an admin user
const hashedPassword = await bcrypt.hash("admin123", 10);
await prisma.user.create({
data: {
email: "admin@demo.com",
password: hashedPassword,
role: "admin",
companyId: company.id,
},
});
console.log("Seed data created successfully:");
console.log("Company: Demo Company");
console.log("Admin user: admin@demo.com (password: admin123)");
}
main()
.catch((e) => {
console.error("Error seeding database:", e);
process.exit(1);
})
.finally(async () => {
await prisma.$disconnect();
});

View File

@ -1,4 +1,4 @@
// seed.ts - Create initial admin user, company, and AI models // seed.ts - Create initial admin user and company
import { PrismaClient } from "@prisma/client"; import { PrismaClient } from "@prisma/client";
import bcrypt from "bcryptjs"; import bcrypt from "bcryptjs";
@ -6,133 +6,30 @@ const prisma = new PrismaClient();
async function main() { async function main() {
try { try {
console.log("🌱 Starting database seeding..."); // Create a company
// Create the Jumbo company
const company = await prisma.company.create({ const company = await prisma.company.create({
data: { data: {
name: "Jumbo Bas Bobbeldijk", name: "Demo Company",
csvUrl: "https://proto.notso.ai/jumbo/chats", csvUrl: "https://example.com/data.csv", // Replace with a real URL if available
csvUsername: "jumboadmin",
csvPassword: "jumboadmin",
}, },
}); });
console.log(`✅ Created company: ${company.name}`);
// Create admin user // Create an admin user
const hashedPassword = await bcrypt.hash("8QbL26tB7fWS", 10); const hashedPassword = await bcrypt.hash("admin123", 10);
const adminUser = await prisma.user.create({ await prisma.user.create({
data: { data: {
email: "max.kowalski.contact@gmail.com", email: "admin@demo.com",
password: hashedPassword, password: hashedPassword,
role: "ADMIN", role: "admin",
companyId: company.id, companyId: company.id,
}, },
}); });
console.log(`✅ Created admin user: ${adminUser.email}`);
// Create AI Models
const aiModels = [
{
name: "gpt-4o",
provider: "openai",
maxTokens: 128000,
isActive: true,
},
{
name: "gpt-4o-2024-08-06",
provider: "openai",
maxTokens: 128000,
isActive: true,
},
{
name: "gpt-4-turbo",
provider: "openai",
maxTokens: 128000,
isActive: true,
},
{
name: "gpt-4o-mini",
provider: "openai",
maxTokens: 128000,
isActive: true,
},
];
const createdModels: any[] = [];
for (const modelData of aiModels) {
const model = await prisma.aIModel.create({
data: modelData,
});
createdModels.push(model);
console.log(`✅ Created AI model: ${model.name}`);
}
// Create current pricing for AI models (as of December 2024)
const currentTime = new Date();
const pricingData = [
{
modelName: "gpt-4o",
promptTokenCost: 0.0000025, // $2.50 per 1M tokens
completionTokenCost: 0.00001, // $10.00 per 1M tokens
},
{
modelName: "gpt-4o-2024-08-06",
promptTokenCost: 0.0000025, // $2.50 per 1M tokens
completionTokenCost: 0.00001, // $10.00 per 1M tokens
},
{
modelName: "gpt-4-turbo",
promptTokenCost: 0.00001, // $10.00 per 1M tokens
completionTokenCost: 0.00003, // $30.00 per 1M tokens
},
{
modelName: "gpt-4o-mini",
promptTokenCost: 0.00000015, // $0.15 per 1M tokens
completionTokenCost: 0.0000006, // $0.60 per 1M tokens
},
];
for (const pricing of pricingData) {
const model = createdModels.find(m => m.name === pricing.modelName);
if (model) {
await prisma.aIModelPricing.create({
data: {
aiModelId: model.id,
promptTokenCost: pricing.promptTokenCost,
completionTokenCost: pricing.completionTokenCost,
effectiveFrom: currentTime,
effectiveUntil: null, // Current pricing
},
});
console.log(`✅ Created pricing for: ${model.name}`);
}
}
// Assign default AI model to company (gpt-4o)
const defaultModel = createdModels.find(m => m.name === "gpt-4o");
if (defaultModel) {
await prisma.companyAIModel.create({
data: {
companyId: company.id,
aiModelId: defaultModel.id,
isDefault: true,
},
});
console.log(`✅ Set default AI model for company: ${defaultModel.name}`);
}
console.log("\n🎉 Database seeding completed successfully!");
console.log("\n📋 Summary:");
console.log(`Company: ${company.name}`);
console.log(`Admin user: ${adminUser.email}`);
console.log(`Password: 8QbL26tB7fWS`);
console.log(`AI Models: ${createdModels.length} models created with current pricing`);
console.log(`Default model: ${defaultModel?.name}`);
console.log("\n🚀 Ready to start importing CSV data!");
console.log("Seed data created successfully:");
console.log("Company: Demo Company");
console.log("Admin user: admin@demo.com (password: admin123)");
} catch (error) { } catch (error) {
console.error("Error seeding database:", error); console.error("Error seeding database:", error);
process.exit(1); process.exit(1);
} finally { } finally {
await prisma.$disconnect(); await prisma.$disconnect();

View File

@ -1,6 +0,0 @@
a068d62a-439b-4d70-924f-3d45ffba673b,30.04.2025 11:38:14,30.04.2025 11:38:14,31.176.221.57,BA,,1,,,,https://proto.notso.ai/jumbo/chats/a068d62a-439b-4d70-924f-3d45ffba673b.txt,2.051,6470,0.0009,,test
284c6849-51ba-41b8-8afd-c1a70e7bd997,30.04.2025 11:41:48,30.04.2025 11:41:48,31.176.221.57,BA,english,1,happy,no,no,https://proto.notso.ai/jumbo/chats/284c6849-51ba-41b8-8afd-c1a70e7bd997.txt,3.977,6537,0.0010,Greeting,Good day
ef6b43f6-e46f-4d6c-9bf7-3d8f3b658d40,01.05.2025 12:11:18,01.05.2025 12:14:53,31.176.221.57,BA,Dutch,8,excited,no,no,https://proto.notso.ai/jumbo/chats/ef6b43f6-e46f-4d6c-9bf7-3d8f3b658d40.txt,3.458,56027,0.0083,Onboarding,whats up
e5c6d4d1-7a02-4c0e-9d93-214ea06d6764,01.05.2025 12:37:43,01.05.2025 12:37:43,31.176.221.57,BA,turkish,1,happy,no,no,https://proto.notso.ai/jumbo/chats/e5c6d4d1-7a02-4c0e-9d93-214ea06d6764.txt,3.004,6549,0.0010,Language inquiry,Spreek je ook turks?
461086bd-bac0-496b-a541-d76468b96f44,01.05.2025 12:48:13,01.05.2025 12:48:21,31.176.221.57,BA,dutch,2,happy,no,no,https://proto.notso.ai/jumbo/chats/461086bd-bac0-496b-a541-d76468b96f44.txt,2.442,13220,0.0020,General,Lalalaposie
689ae197-2005-4f09-b993-d9f6fa16fc1f,01.05.2025 12:52:07,01.05.2025 12:57:14,31.176.221.57,BA,,3,,,,https://proto.notso.ai/jumbo/chats/689ae197-2005-4f09-b993-d9f6fa16fc1f.txt,1.487,19751,0.0029,,hi liza
1 a068d62a-439b-4d70-924f-3d45ffba673b 30.04.2025 11:38:14 30.04.2025 11:38:14 31.176.221.57 BA 1 https://proto.notso.ai/jumbo/chats/a068d62a-439b-4d70-924f-3d45ffba673b.txt 2.051 6470 0.0009 test
2 284c6849-51ba-41b8-8afd-c1a70e7bd997 30.04.2025 11:41:48 30.04.2025 11:41:48 31.176.221.57 BA english 1 happy no no https://proto.notso.ai/jumbo/chats/284c6849-51ba-41b8-8afd-c1a70e7bd997.txt 3.977 6537 0.0010 Greeting Good day
3 ef6b43f6-e46f-4d6c-9bf7-3d8f3b658d40 01.05.2025 12:11:18 01.05.2025 12:14:53 31.176.221.57 BA Dutch 8 excited no no https://proto.notso.ai/jumbo/chats/ef6b43f6-e46f-4d6c-9bf7-3d8f3b658d40.txt 3.458 56027 0.0083 Onboarding whats up
4 e5c6d4d1-7a02-4c0e-9d93-214ea06d6764 01.05.2025 12:37:43 01.05.2025 12:37:43 31.176.221.57 BA turkish 1 happy no no https://proto.notso.ai/jumbo/chats/e5c6d4d1-7a02-4c0e-9d93-214ea06d6764.txt 3.004 6549 0.0010 Language inquiry Spreek je ook turks?
5 461086bd-bac0-496b-a541-d76468b96f44 01.05.2025 12:48:13 01.05.2025 12:48:21 31.176.221.57 BA dutch 2 happy no no https://proto.notso.ai/jumbo/chats/461086bd-bac0-496b-a541-d76468b96f44.txt 2.442 13220 0.0020 General Lalalaposie
6 689ae197-2005-4f09-b993-d9f6fa16fc1f 01.05.2025 12:52:07 01.05.2025 12:57:14 31.176.221.57 BA 3 https://proto.notso.ai/jumbo/chats/689ae197-2005-4f09-b993-d9f6fa16fc1f.txt 1.487 19751 0.0029 hi liza

View File

@ -0,0 +1,64 @@
// Check current database status
import { PrismaClient } from '@prisma/client';
const prisma = new PrismaClient();
async function checkDatabaseStatus() {
try {
console.log('📊 Checking database status...\n');
// Count total sessions
const totalSessions = await prisma.session.count();
console.log(`📈 Total sessions: ${totalSessions}`);
// Count processed vs unprocessed
const processedSessions = await prisma.session.count({
where: { processed: true }
});
const unprocessedSessions = await prisma.session.count({
where: { processed: false }
});
console.log(`✅ Processed sessions: ${processedSessions}`);
console.log(`⏳ Unprocessed sessions: ${unprocessedSessions}`);
// Count valid vs invalid data
const validSessions = await prisma.session.count({
where: { validData: true }
});
const invalidSessions = await prisma.session.count({
where: { validData: false }
});
console.log(`🎯 Valid data sessions: ${validSessions}`);
console.log(`❌ Invalid data sessions: ${invalidSessions}`);
// Count sessions with messages
const sessionsWithMessages = await prisma.session.count({
where: {
messages: {
some: {}
}
}
});
console.log(`💬 Sessions with messages: ${sessionsWithMessages}`);
// Count companies
const totalCompanies = await prisma.company.count();
console.log(`🏢 Total companies: ${totalCompanies}`);
if (totalSessions === 0) {
console.log('\n💡 No sessions found. Run CSV refresh to import data:');
console.log(' curl -X POST http://localhost:3000/api/admin/refresh-sessions');
}
} catch (error) {
console.error('❌ Error checking database status:', error);
} finally {
await prisma.$disconnect();
}
}
// Run the script
checkDatabaseStatus();

View File

@ -0,0 +1,69 @@
// Check why questions aren't being extracted properly
import { PrismaClient } from '@prisma/client';
const prisma = new PrismaClient();
async function checkQuestionsIssue() {
console.log('🔍 INVESTIGATING QUESTIONS EXTRACTION ISSUE\n');
// Find a session with questions stored
const sessionWithQuestions = await prisma.session.findFirst({
where: {
processed: true,
questions: { not: null }
},
include: { messages: true }
});
if (sessionWithQuestions) {
console.log('📋 SAMPLE SESSION WITH QUESTIONS:');
console.log('Session ID:', sessionWithQuestions.id);
console.log('Questions stored:', sessionWithQuestions.questions);
console.log('Summary:', sessionWithQuestions.summary);
console.log('Messages count:', sessionWithQuestions.messages.length);
console.log('\n💬 FIRST FEW MESSAGES:');
sessionWithQuestions.messages.slice(0, 8).forEach((msg, i) => {
console.log(` ${i+1}. [${msg.role}]: ${msg.content.substring(0, 150)}...`);
});
}
// Check sessions marked as invalid data
const invalidSessions = await prisma.session.count({
where: {
processed: true,
questions: '[]' // Empty questions array
}
});
console.log(`\n⚠️ SESSIONS WITH EMPTY QUESTIONS: ${invalidSessions}`);
// Find a session with empty questions to analyze
const emptyQuestionSession = await prisma.session.findFirst({
where: {
processed: true,
questions: '[]'
},
include: { messages: true }
});
if (emptyQuestionSession) {
console.log('\n❌ SAMPLE SESSION WITH EMPTY QUESTIONS:');
console.log('Session ID:', emptyQuestionSession.id);
console.log('Questions stored:', emptyQuestionSession.questions);
console.log('Summary:', emptyQuestionSession.summary);
console.log('Messages count:', emptyQuestionSession.messages.length);
console.log('\n💬 MESSAGES FROM EMPTY QUESTION SESSION:');
emptyQuestionSession.messages.slice(0, 8).forEach((msg, i) => {
console.log(` ${i+1}. [${msg.role}]: ${msg.content.substring(0, 150)}...`);
});
}
console.log('\n🤖 CURRENT OPENAI MODEL: gpt-4-turbo');
console.log('🎯 PROMPT INSTRUCTION: "Max 5 user questions in English"');
await prisma.$disconnect();
}
checkQuestionsIssue();

View File

@ -0,0 +1,76 @@
// Script to check what's in the transcript files
// Usage: node scripts/check-transcript-content.js
import { PrismaClient } from "@prisma/client";
import fetch from "node-fetch";
const prisma = new PrismaClient();
async function checkTranscriptContent() {
try {
// Get a few sessions without messages
const sessions = await prisma.session.findMany({
where: {
AND: [{ fullTranscriptUrl: { not: null } }, { messages: { none: {} } }],
},
include: { company: true },
take: 3,
});
for (const session of sessions) {
console.log(`\n📄 Checking session ${session.id}:`);
console.log(` URL: ${session.fullTranscriptUrl}`);
try {
const authHeader =
session.company.csvUsername && session.company.csvPassword
? "Basic " +
Buffer.from(
`${session.company.csvUsername}:${session.company.csvPassword}`
).toString("base64")
: undefined;
const response = await fetch(session.fullTranscriptUrl, {
headers: authHeader ? { Authorization: authHeader } : {},
timeout: 10000,
});
if (!response.ok) {
console.log(` ❌ HTTP ${response.status}: ${response.statusText}`);
continue;
}
const content = await response.text();
console.log(` 📏 Content length: ${content.length} characters`);
if (content.length === 0) {
console.log(` ⚠️ Empty file`);
} else if (content.length < 100) {
console.log(` 📝 Full content: "${content}"`);
} else {
console.log(
` 📝 First 200 chars: "${content.substring(0, 200)}..."`
);
}
// Check if it matches our expected format
const lines = content.split("\n").filter((line) => line.trim());
const formatMatches = lines.filter((line) =>
line.match(/^\[([^\]]+)\]\s*([^:]+):\s*(.+)$/)
);
console.log(
` 🔍 Lines total: ${lines.length}, Format matches: ${formatMatches.length}`
);
} catch (error) {
console.log(` ❌ Error: ${error.message}`);
}
}
} catch (error) {
console.error("❌ Error:", error);
} finally {
await prisma.$disconnect();
}
}
checkTranscriptContent();

View File

@ -0,0 +1,34 @@
// Check sessions for transcript URLs
import { PrismaClient } from "@prisma/client";
const prisma = new PrismaClient();
async function checkTranscriptUrls() {
const sessions = await prisma.session.findMany({
where: {
messages: { none: {} },
},
select: {
id: true,
fullTranscriptUrl: true,
}
});
const withUrl = sessions.filter(s => s.fullTranscriptUrl);
const withoutUrl = sessions.filter(s => !s.fullTranscriptUrl);
console.log(`\n📊 Transcript URL Status for Sessions without Messages:`);
console.log(`✅ Sessions with transcript URL: ${withUrl.length}`);
console.log(`❌ Sessions without transcript URL: ${withoutUrl.length}`);
if (withUrl.length > 0) {
console.log(`\n🔍 Sample URLs:`);
withUrl.slice(0, 3).forEach(s => {
console.log(` ${s.id}: ${s.fullTranscriptUrl}`);
});
}
await prisma.$disconnect();
}
checkTranscriptUrls();

Some files were not shown because too many files have changed in this diff Show More