diff --git a/.dockerignore b/.dockerignore index 51f9b1b99d..a51a6b549d 100644 --- a/.dockerignore +++ b/.dockerignore @@ -1,6 +1,6 @@ # Dependencies node_modules -*/node_modules +**/node_modules npm-debug.log* yarn-debug.log* yarn-error.log* diff --git a/apps/api/Dockerfile.multistage b/apps/api/Dockerfile.multistage index 67611dd2b7..f0cd5c24be 100644 --- a/apps/api/Dockerfile.multistage +++ b/apps/api/Dockerfile.multistage @@ -1,5 +1,5 @@ # ============================================================================= -# STAGE 1: Dependencies - Install workspace dependencies +# STAGE 1: Dependencies - Install only what the API needs # ============================================================================= FROM oven/bun:1.2.8 AS deps @@ -8,7 +8,16 @@ WORKDIR /app # Copy root workspace config COPY package.json bun.lock ./ -# Copy all workspace package.json files +# Strip root package.json to only keep workspaces config. +# The root has frontend deps (design-system, react-dnd, sharp, semantic-release, etc.) +# that the API doesn't need. Removing them cuts ~800 packages from the install. +RUN cat package.json | bun -e " \ + const pkg = JSON.parse(await Bun.stdin.text()); \ + delete pkg.dependencies; delete pkg.devDependencies; delete pkg.scripts; \ + console.log(JSON.stringify(pkg, null, 2));" > package.min.json \ + && mv package.min.json package.json + +# Copy only the workspace package.json files the API depends on COPY packages/auth/package.json ./packages/auth/ COPY packages/db/package.json ./packages/db/ COPY packages/utils/package.json ./packages/utils/ @@ -20,16 +29,23 @@ COPY packages/company/package.json ./packages/company/ # Copy API package.json COPY apps/api/package.json ./apps/api/ -# Install all dependencies (including workspace deps) -RUN bun install +# Install dependencies — skip lifecycle scripts (husky, etc. not needed in Docker) +RUN bun install --ignore-scripts # ============================================================================= # STAGE 2: Builder - Build workspace packages and NestJS app # ============================================================================= -FROM deps AS builder +FROM oven/bun:1.2.8 AS builder WORKDIR /app +# Copy node_modules first (from deps stage), then source on top. +# This avoids conflicts between workspace symlinks and local node_modules +# that get included from the build context. +COPY --from=deps /app/node_modules ./node_modules +COPY --from=deps /app/package.json ./package.json +COPY --from=deps /app/bun.lock ./bun.lock + # Copy workspace packages source COPY packages/auth ./packages/auth COPY packages/db ./packages/db @@ -42,66 +58,59 @@ COPY packages/company ./packages/company # Copy API source COPY apps/api ./apps/api -# Bring in node_modules from deps stage -COPY --from=deps /app/node_modules ./node_modules - -# Build workspace packages -RUN cd packages/auth && bun run build && cd ../.. -RUN cd packages/db && bun run build && cd ../.. -RUN cd packages/integration-platform && bun run build && cd ../.. -RUN cd packages/email && bun run build && cd ../.. -RUN cd packages/company && bun run build && cd ../.. +# Build db first — generates Prisma client needed by other packages +RUN cd packages/db && bun run build -# Generate Prisma client for API (copy schema and generate) -RUN cd packages/db && node scripts/combine-schemas.js && cd ../.. -RUN cp packages/db/dist/schema.prisma apps/api/prisma/schema.prisma -RUN cd apps/api && bunx prisma generate +# Build remaining workspace packages +RUN cd packages/auth && bun run build \ + && cd ../integration-platform && bun run build \ + && cd ../email && bun run build \ + && cd ../company && bun run build -# Build NestJS application (skip prebuild since we already generated Prisma) -RUN cd apps/api && bunx nest build +# Generate Prisma schema for API and build NestJS app +RUN cd packages/db && node scripts/combine-schemas.js \ + && cp /app/packages/db/dist/schema.prisma /app/apps/api/prisma/schema.prisma \ + && cd /app/apps/api && bunx prisma generate && bunx nest build # ============================================================================= # STAGE 3: Production Runtime # ============================================================================= FROM node:20-slim AS production +# Create non-root user before copying files so COPY --chown can use it +RUN groupadd --system nestjs && useradd --system --gid nestjs --create-home nestjs + WORKDIR /app +RUN chown nestjs:nestjs /app # Install runtime dependencies RUN apt-get update && apt-get install -y --no-install-recommends wget openssl && rm -rf /var/lib/apt/lists/* # Copy built NestJS app -COPY --from=builder /app/apps/api/dist ./dist +COPY --from=builder --chown=nestjs:nestjs /app/apps/api/dist ./dist -# Copy prisma files -COPY --from=builder /app/apps/api/prisma ./prisma +# Copy prisma schema (for reference only — client is already generated in node_modules) +COPY --from=builder --chown=nestjs:nestjs /app/apps/api/prisma ./prisma # Copy package.json (for any runtime needs) -COPY --from=builder /app/apps/api/package.json ./package.json +COPY --from=builder --chown=nestjs:nestjs /app/apps/api/package.json ./package.json # Copy workspace packages that are referenced by node_modules symlinks -COPY --from=builder /app/packages/auth ./packages/auth -COPY --from=builder /app/packages/db ./packages/db -COPY --from=builder /app/packages/utils ./packages/utils -COPY --from=builder /app/packages/integration-platform ./packages/integration-platform -COPY --from=builder /app/packages/tsconfig ./packages/tsconfig -COPY --from=builder /app/packages/email ./packages/email -COPY --from=builder /app/packages/company ./packages/company +COPY --from=builder --chown=nestjs:nestjs /app/packages/auth ./packages/auth +COPY --from=builder --chown=nestjs:nestjs /app/packages/db ./packages/db +COPY --from=builder --chown=nestjs:nestjs /app/packages/utils ./packages/utils +COPY --from=builder --chown=nestjs:nestjs /app/packages/integration-platform ./packages/integration-platform +COPY --from=builder --chown=nestjs:nestjs /app/packages/tsconfig ./packages/tsconfig +COPY --from=builder --chown=nestjs:nestjs /app/packages/email ./packages/email +COPY --from=builder --chown=nestjs:nestjs /app/packages/company ./packages/company -# Copy production node_modules (includes symlinks to workspace packages above) -COPY --from=builder /app/node_modules ./node_modules +# Copy production node_modules (includes Prisma client already generated for linux/amd64) +COPY --from=builder --chown=nestjs:nestjs /app/node_modules ./node_modules # Set production environment ENV NODE_ENV=production ENV PORT=3333 -# Regenerate Prisma client for this runtime environment -RUN npx prisma generate --schema=./prisma/schema.prisma - -# Create non-root user -RUN groupadd --system nestjs && useradd --system --gid nestjs nestjs \ - && chown -R nestjs:nestjs /app - USER nestjs EXPOSE 3333 diff --git a/apps/api/buildspec.multistage.yml b/apps/api/buildspec.multistage.yml index 4671a8e1ac..c9d5a32671 100644 --- a/apps/api/buildspec.multistage.yml +++ b/apps/api/buildspec.multistage.yml @@ -1,7 +1,7 @@ version: 0.2 -# Simplified buildspec that uses multi stage Docker build -# al building happens inside Docker - CodeBuild just orchestrates ECR/ECS +# Simplified buildspec that uses multi-stage Docker build. +# All building happens inside Docker — CodeBuild just orchestrates ECR/ECS. phases: pre_build: @@ -10,12 +10,21 @@ phases: - aws ecr get-login-password --region $AWS_DEFAULT_REGION | docker login --username AWS --password-stdin $AWS_ACCOUNT_ID.dkr.ecr.$AWS_DEFAULT_REGION.amazonaws.com - COMMIT_HASH=$(echo $CODEBUILD_RESOLVED_SOURCE_VERSION | cut -c 1-7) - IMAGE_TAG=${COMMIT_HASH:=latest} + # Pull latest image for Docker layer cache (ignore failure on first build) + - docker pull $ECR_REPOSITORY_URI:latest || true build: commands: - echo "Building Docker image with multi-stage build..." - cd apps/api - - docker build --build-arg BUILDKIT_INLINE_CACHE=1 --target production -f Dockerfile.multistage -t $ECR_REPOSITORY_URI:$IMAGE_TAG ../.. + - >- + docker build + --build-arg BUILDKIT_INLINE_CACHE=1 + --cache-from $ECR_REPOSITORY_URI:latest + --target production + -f Dockerfile.multistage + -t $ECR_REPOSITORY_URI:$IMAGE_TAG + ../.. - docker tag $ECR_REPOSITORY_URI:$IMAGE_TAG $ECR_REPOSITORY_URI:latest post_build: @@ -27,12 +36,7 @@ phases: - aws ecs update-service --cluster $ECS_CLUSTER_NAME --service $ECS_SERVICE_NAME --force-new-deployment - 'printf "[{\"name\":\"%s-container\",\"imageUri\":\"%s\"}]" api $ECR_REPOSITORY_URI:$IMAGE_TAG > imagedefinitions.json' -cache: - paths: - - '/root/.docker/buildx/cache/**/*' - artifacts: files: - imagedefinitions.json name: ${APP_NAME}-build - diff --git a/apps/api/package.json b/apps/api/package.json index a04bbff15e..113156c30b 100644 --- a/apps/api/package.json +++ b/apps/api/package.json @@ -49,7 +49,7 @@ "express": "^4.21.2", "helmet": "^8.1.0", "jose": "^6.0.12", - "jspdf": "^3.0.3", + "jspdf": "^4.2.0", "mammoth": "^1.8.0", "nanoid": "^5.1.6", "pdf-lib": "^1.17.1", @@ -63,7 +63,6 @@ "safe-stable-stringify": "^2.5.0", "stripe": "^20.4.0", "swagger-ui-express": "^5.0.1", - "xlsx": "^0.18.5", "zod": "^4.0.14" }, "devDependencies": { diff --git a/apps/api/src/organization/organization.service.ts b/apps/api/src/organization/organization.service.ts index 25255acf2b..5be0bf2eac 100644 --- a/apps/api/src/organization/organization.service.ts +++ b/apps/api/src/organization/organization.service.ts @@ -349,17 +349,17 @@ export class OrganizationService { }, employee: { policyNotifications: true, - taskReminders: true, - taskAssignments: true, - taskMentions: true, - weeklyTaskDigest: true, + taskReminders: false, + taskAssignments: false, + taskMentions: false, + weeklyTaskDigest: false, findingNotifications: false, }, contractor: { policyNotifications: true, - taskReminders: true, - taskAssignments: true, - taskMentions: true, + taskReminders: false, + taskAssignments: false, + taskMentions: false, weeklyTaskDigest: false, findingNotifications: false, }, diff --git a/apps/api/src/questionnaire/questionnaire.service.spec.ts b/apps/api/src/questionnaire/questionnaire.service.spec.ts index d5d103da9b..150d6370e3 100644 --- a/apps/api/src/questionnaire/questionnaire.service.spec.ts +++ b/apps/api/src/questionnaire/questionnaire.service.spec.ts @@ -425,7 +425,7 @@ describe('QuestionnaireService', () => { mimeType: 'text/csv', filename: 'test.csv', }; - (generateExportFile as jest.Mock).mockReturnValue(mockExport); + (generateExportFile as jest.Mock).mockResolvedValue(mockExport); const result = await service.exportById({ questionnaireId: 'q1', diff --git a/apps/api/src/questionnaire/questionnaire.service.ts b/apps/api/src/questionnaire/questionnaire.service.ts index 5ffaf07196..71bda767fb 100644 --- a/apps/api/src/questionnaire/questionnaire.service.ts +++ b/apps/api/src/questionnaire/questionnaire.service.ts @@ -148,7 +148,7 @@ export class QuestionnaireService { const zip = new AdmZip(); for (const format of formats) { - const exportFile = generateExportFile( + const exportFile = await generateExportFile( answered.map((a) => ({ question: a.question, answer: a.answer })), format, vendorName, @@ -182,7 +182,7 @@ export class QuestionnaireService { } // Single format export (default behavior) - const exportFile = generateExportFile( + const exportFile = await generateExportFile( answered.map((a) => ({ question: a.question, answer: a.answer })), dto.format as ExportFormat, vendorName, @@ -433,7 +433,7 @@ export class QuestionnaireService { format: dto.format, }); - return generateExportFile( + return await generateExportFile( questionsAndAnswers, dto.format as ExportFormat, questionnaire.filename, diff --git a/apps/api/src/questionnaire/utils/content-extractor.spec.ts b/apps/api/src/questionnaire/utils/content-extractor.spec.ts new file mode 100644 index 0000000000..5326df8437 --- /dev/null +++ b/apps/api/src/questionnaire/utils/content-extractor.spec.ts @@ -0,0 +1,100 @@ +import { extractContentFromFile } from './content-extractor'; +import ExcelJS from 'exceljs'; + +// Mock AI dependencies +jest.mock('@ai-sdk/openai', () => ({ openai: jest.fn() })); +jest.mock('@ai-sdk/anthropic', () => ({ anthropic: jest.fn() })); +jest.mock('@ai-sdk/groq', () => ({ createGroq: jest.fn(() => jest.fn()) })); +jest.mock('ai', () => ({ + generateText: jest.fn(), + generateObject: jest.fn(), + jsonSchema: jest.fn((s) => s), +})); + +async function createTestExcelBuffer( + sheets: { name: string; rows: (string | number)[][] }[], +): Promise { + const workbook = new ExcelJS.Workbook(); + for (const sheet of sheets) { + const ws = workbook.addWorksheet(sheet.name); + for (const row of sheet.rows) { + ws.addRow(row); + } + } + const arrayBuffer = await workbook.xlsx.writeBuffer(); + return Buffer.from(arrayBuffer); +} + +describe('content-extractor: extractContentFromFile', () => { + const XLSX_MIME = + 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'; + + it('should extract content from an Excel file with headers', async () => { + const buffer = await createTestExcelBuffer([ + { + name: 'Survey', + rows: [ + ['Question', 'Response', 'Comment'], + ['Do you agree?', 'Yes', 'Fully agree'], + ['Rating?', '5', ''], + ], + }, + ]); + + const base64 = buffer.toString('base64'); + const result = await extractContentFromFile(base64, XLSX_MIME); + + expect(result).toContain('Question'); + expect(result).toContain('Do you agree?'); + expect(result).toContain('Yes'); + expect(result).toContain('Rating?'); + }); + + it('should extract content from multiple sheets', async () => { + const buffer = await createTestExcelBuffer([ + { name: 'General', rows: [['Info', 'Details'], ['Name', 'Acme Corp']] }, + { name: 'Security', rows: [['Control', 'Status'], ['MFA', 'Enabled']] }, + ]); + + const base64 = buffer.toString('base64'); + const result = await extractContentFromFile(base64, XLSX_MIME); + + expect(result).toContain('Acme Corp'); + expect(result).toContain('MFA'); + }); + + it('should handle CSV files', async () => { + const csv = 'question,answer\nWhat is 2+2?,4\n'; + const base64 = Buffer.from(csv).toString('base64'); + + const result = await extractContentFromFile(base64, 'text/csv'); + + expect(result).toContain('question,answer'); + expect(result).toContain('What is 2+2?,4'); + }); + + it('should handle plain text files', async () => { + const text = 'Some compliance document content'; + const base64 = Buffer.from(text).toString('base64'); + + const result = await extractContentFromFile(base64, 'text/plain'); + + expect(result).toBe(text); + }); + + it('should throw for Word documents', async () => { + const base64 = Buffer.from('fake').toString('base64'); + + await expect( + extractContentFromFile(base64, 'application/msword'), + ).rejects.toThrow('Word documents'); + }); + + it('should throw for unsupported types', async () => { + const base64 = Buffer.from('data').toString('base64'); + + await expect( + extractContentFromFile(base64, 'application/octet-stream'), + ).rejects.toThrow('Unsupported file type'); + }); +}); diff --git a/apps/api/src/questionnaire/utils/content-extractor.ts b/apps/api/src/questionnaire/utils/content-extractor.ts index ef8e61054c..5608801d20 100644 --- a/apps/api/src/questionnaire/utils/content-extractor.ts +++ b/apps/api/src/questionnaire/utils/content-extractor.ts @@ -2,10 +2,20 @@ import { openai } from '@ai-sdk/openai'; import { anthropic } from '@ai-sdk/anthropic'; import { createGroq } from '@ai-sdk/groq'; import { generateText, generateObject, jsonSchema } from 'ai'; -import * as XLSX from 'xlsx'; +import ExcelJS from 'exceljs'; import AdmZip from 'adm-zip'; import { PARSING_MODEL, VISION_EXTRACTION_PROMPT } from './constants'; +/** + * Loads an Excel workbook from a buffer. + */ +async function loadWorkbook(data: Uint8Array): Promise { + const workbook = new ExcelJS.Workbook(); + type LoadFn = (data: Uint8Array) => Promise; + await (workbook.xlsx.load as unknown as LoadFn)(data); + return workbook; +} + // Initialize Groq - ultra fast inference const groq = createGroq(); @@ -104,7 +114,7 @@ export async function extractQuestionsWithAI( // For Excel files - use simple library extraction then AI parsing if (isExcelFile(fileType)) { const fileBuffer = Buffer.from(fileData, 'base64'); - const rawContent = extractExcelRawContent(fileBuffer, logger); + const rawContent = await extractExcelRawContent(fileBuffer, logger); logger.info('Extracted raw Excel content', { contentLength: rawContent.length, @@ -141,19 +151,19 @@ export async function extractQuestionsWithAI( * Simple raw content extraction - just dump all cell values * No smart header detection, let AI figure it out */ -function extractExcelRawContent( +async function extractExcelRawContent( fileBuffer: Buffer, logger: ContentExtractionLogger, -): string { +): Promise { // Try custom XML parser first (handles rich text) try { const zip = new AdmZip(fileBuffer); const sharedStrings = extractSharedStrings(fileBuffer); - const workbook = XLSX.read(fileBuffer, { type: 'buffer' }); + const sheetNames = extractSheetNames(zip); const allContent: string[] = []; - for (let sheetIdx = 0; sheetIdx < workbook.SheetNames.length; sheetIdx++) { - const sheetName = workbook.SheetNames[sheetIdx]; + for (let sheetIdx = 0; sheetIdx < sheetNames.length; sheetIdx++) { + const sheetName = sheetNames[sheetIdx]; const rows = extractSheetData(zip, sheetIdx, sharedStrings); if (rows.length === 0) continue; @@ -180,26 +190,23 @@ function extractExcelRawContent( }); } - // Fallback to standard xlsx library - const workbook = XLSX.read(fileBuffer, { type: 'buffer' }); + // Fallback to exceljs library + const workbook = await loadWorkbook(fileBuffer); const allContent: string[] = []; - for (const sheetName of workbook.SheetNames) { - const worksheet = workbook.Sheets[sheetName]; - const data = XLSX.utils.sheet_to_json(worksheet, { - header: 1, - defval: '', - }); - - allContent.push(`\n--- ${sheetName} ---`); + for (const worksheet of workbook.worksheets) { + allContent.push(`\n--- ${worksheet.name} ---`); - for (const row of data) { - const cells = row as unknown[]; - const nonEmpty = cells.map((c) => String(c).trim()).filter((c) => c); + worksheet.eachRow((row) => { + const cells = row.values as unknown[]; + const nonEmpty = cells + .slice(1) + .map((c) => String(c ?? '').trim()) + .filter((c) => c); if (nonEmpty.length > 0) { allContent.push(nonEmpty.join(' | ')); } - } + }); } return allContent.join('\n'); @@ -534,6 +541,29 @@ function isImageOrPdf(fileType: string): boolean { return fileType.startsWith('image/') || fileType === 'application/pdf'; } +/** + * Extracts sheet names from the workbook XML inside an xlsx zip archive + */ +function extractSheetNames(zip: AdmZip): string[] { + try { + const workbookEntry = zip.getEntry('xl/workbook.xml'); + if (!workbookEntry) return []; + + const content = workbookEntry.getData().toString('utf8'); + const names: string[] = []; + const sheetPattern = /]+name="([^"]*)"[^>]*\/>/g; + let m; + + while ((m = sheetPattern.exec(content)) !== null) { + names.push(m[1]); + } + + return names; + } catch { + return []; + } +} + /** * Extracts shared strings from Excel file, handling rich text with namespace prefixes * Some Excel files use instead of for rich text, which standard libraries miss @@ -647,20 +677,24 @@ function extractSheetData( } /** - * Fallback extraction using standard xlsx library (for simple Excel files) + * Fallback extraction using exceljs library (for simple Excel files) */ -function extractFromExcelStandard( +async function extractFromExcelStandard( fileBuffer: Buffer, - logger: ContentExtractionLogger, -): string { - const workbook = XLSX.read(fileBuffer, { type: 'buffer' }); + _logger: ContentExtractionLogger, +): Promise { + const workbook = await loadWorkbook(fileBuffer); const sheets: string[] = []; - for (const sheetName of workbook.SheetNames) { - const worksheet = workbook.Sheets[sheetName]; - const jsonData = XLSX.utils.sheet_to_json(worksheet, { - header: 1, - defval: '', + for (const worksheet of workbook.worksheets) { + const jsonData: string[][] = []; + worksheet.eachRow((row) => { + const cells = row.values as unknown[]; + jsonData.push( + cells.slice(1).map((cell) => + cell !== null && cell !== undefined ? String(cell).trim() : '', + ), + ); }); if (jsonData.length === 0) continue; @@ -672,8 +706,7 @@ function extractFromExcelStandard( // Find header row for (let i = 0; i < Math.min(10, jsonData.length); i++) { const row = jsonData[i]; - if (!Array.isArray(row)) continue; - const rowLower = row.map((cell) => String(cell).toLowerCase().trim()); + const rowLower = row.map((cell) => cell.toLowerCase()); const headerKeywords = [ 'question', 'response', @@ -687,19 +720,14 @@ function extractFromExcelStandard( if (matchCount >= 2) { headerRowIndex = i; - columnHeaders = row.map((cell) => String(cell).trim()); + columnHeaders = row; break; } } // Process rows for (let i = 0; i < jsonData.length; i++) { - const row = jsonData[i]; - if (!Array.isArray(row)) continue; - - const cells = row.map((cell) => - cell !== null && cell !== undefined ? String(cell).trim() : '', - ); + const cells = jsonData[i]; const hasContent = cells.some((cell) => cell !== ''); if (!hasContent) continue; @@ -729,7 +757,9 @@ function extractFromExcelStandard( } if (formattedRows.length > 0) { - sheets.push(`=== Sheet: ${sheetName} ===\n${formattedRows.join('\n')}`); + sheets.push( + `=== Sheet: ${worksheet.name} ===\n${formattedRows.join('\n')}`, + ); } } @@ -737,11 +767,11 @@ function extractFromExcelStandard( } // Content extraction functions -function extractFromExcel( +async function extractFromExcel( fileBuffer: Buffer, fileType: string, logger: ContentExtractionLogger, -): string { +): Promise { const excelStartTime = Date.now(); const fileSizeMB = (fileBuffer.length / (1024 * 1024)).toFixed(2); @@ -758,11 +788,11 @@ function extractFromExcel( count: sharedStrings.length, }); - const workbook = XLSX.read(fileBuffer, { type: 'buffer' }); + const sheetNames = extractSheetNames(zip); const sheets: string[] = []; - for (let sheetIdx = 0; sheetIdx < workbook.SheetNames.length; sheetIdx++) { - const sheetName = workbook.SheetNames[sheetIdx]; + for (let sheetIdx = 0; sheetIdx < sheetNames.length; sheetIdx++) { + const sheetName = sheetNames[sheetIdx]; const rows = extractSheetData(zip, sheetIdx, sharedStrings); if (rows.length === 0) continue; @@ -835,14 +865,14 @@ function extractFromExcel( logger.info( 'Custom parser returned minimal content, trying standard library', ); - result = extractFromExcelStandard(fileBuffer, logger); + result = await extractFromExcelStandard(fileBuffer, logger); } } catch (error) { - // Fallback to standard xlsx library if custom parser fails + // Fallback to exceljs library if custom parser fails logger.warn('Custom Excel parser failed, using standard library', { error: error instanceof Error ? error.message : 'Unknown error', }); - result = extractFromExcelStandard(fileBuffer, logger); + result = await extractFromExcelStandard(fileBuffer, logger); } const extractionTime = ((Date.now() - excelStartTime) / 1000).toFixed(2); diff --git a/apps/api/src/questionnaire/utils/export-generator.spec.ts b/apps/api/src/questionnaire/utils/export-generator.spec.ts new file mode 100644 index 0000000000..224d27b51f --- /dev/null +++ b/apps/api/src/questionnaire/utils/export-generator.spec.ts @@ -0,0 +1,139 @@ +import { + generateExportFile, + generateXLSX, + generateCSV, + generatePDF, +} from './export-generator'; +import ExcelJS from 'exceljs'; +import type { QuestionAnswer } from './question-parser'; + +const sampleQAs: QuestionAnswer[] = [ + { question: 'Do you have MFA?', answer: 'Yes' }, + { question: 'Describe your backup strategy', answer: null }, + { question: 'SOC 2 compliant?', answer: 'In progress' }, +]; + +describe('generateXLSX', () => { + it('should produce a valid XLSX buffer', async () => { + const buffer = await generateXLSX(sampleQAs); + + expect(Buffer.isBuffer(buffer)).toBe(true); + expect(buffer.length).toBeGreaterThan(0); + + // Verify by reading back with ExcelJS + const workbook = new ExcelJS.Workbook(); + type LoadFn = (data: Uint8Array) => Promise; + await (workbook.xlsx.load as unknown as LoadFn)(buffer); + + expect(workbook.worksheets.length).toBe(1); + const ws = workbook.worksheets[0]; + expect(ws.name).toBe('Questionnaire'); + + // Header row + 3 data rows = 4 rows + expect(ws.rowCount).toBe(4); + + // Check header + const headerRow = ws.getRow(1); + expect(headerRow.getCell(1).value).toBe('#'); + expect(headerRow.getCell(2).value).toBe('Question'); + expect(headerRow.getCell(3).value).toBe('Answer'); + + // Check first data row + const dataRow = ws.getRow(2); + expect(dataRow.getCell(1).value).toBe(1); + expect(dataRow.getCell(2).value).toBe('Do you have MFA?'); + expect(dataRow.getCell(3).value).toBe('Yes'); + + // Check null answer becomes empty string + const nullAnswerRow = ws.getRow(3); + expect(nullAnswerRow.getCell(3).value).toBe(''); + }); + + it('should handle empty input', async () => { + const buffer = await generateXLSX([]); + + const workbook = new ExcelJS.Workbook(); + type LoadFn = (data: Uint8Array) => Promise; + await (workbook.xlsx.load as unknown as LoadFn)(buffer); + const ws = workbook.worksheets[0]; + + // Only header row + expect(ws.rowCount).toBe(1); + }); +}); + +describe('generateCSV', () => { + it('should produce valid CSV with headers', () => { + const csv = generateCSV(sampleQAs); + const lines = csv.split('\n'); + + expect(lines[0]).toBe('"#","Question","Answer"'); + expect(lines[1]).toBe('"1","Do you have MFA?","Yes"'); + expect(lines[2]).toBe('"2","Describe your backup strategy",""'); + expect(lines[3]).toBe('"3","SOC 2 compliant?","In progress"'); + }); + + it('should escape double quotes in CSV', () => { + const qas: QuestionAnswer[] = [ + { question: 'Is "security" important?', answer: 'Yes, "very"' }, + ]; + const csv = generateCSV(qas); + + expect(csv).toContain('""security""'); + expect(csv).toContain('""very""'); + }); +}); + +describe('generatePDF', () => { + it('should produce a valid PDF buffer', () => { + const buffer = generatePDF(sampleQAs, 'TestVendor'); + + expect(Buffer.isBuffer(buffer)).toBe(true); + expect(buffer.length).toBeGreaterThan(0); + // PDF files start with %PDF + expect(buffer.toString('utf-8', 0, 4)).toBe('%PDF'); + }); + + it('should handle empty QAs', () => { + const buffer = generatePDF([], 'Empty'); + + expect(Buffer.isBuffer(buffer)).toBe(true); + expect(buffer.toString('utf-8', 0, 4)).toBe('%PDF'); + }); +}); + +describe('generateExportFile', () => { + it('should generate XLSX export with correct metadata', async () => { + const result = await generateExportFile(sampleQAs, 'xlsx', 'vendor-test.pdf'); + + expect(result.mimeType).toBe( + 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', + ); + expect(result.filename).toBe('vendor-test.xlsx'); + expect(Buffer.isBuffer(result.fileBuffer)).toBe(true); + }); + + it('should generate CSV export with correct metadata', async () => { + const result = await generateExportFile(sampleQAs, 'csv', 'vendor-test.xlsx'); + + expect(result.mimeType).toBe('text/csv'); + expect(result.filename).toBe('vendor-test.csv'); + }); + + it('should generate PDF export with correct metadata', async () => { + const result = await generateExportFile(sampleQAs, 'pdf', 'vendor-test'); + + expect(result.mimeType).toBe('application/pdf'); + expect(result.filename).toBe('vendor-test.pdf'); + }); + + it('should sanitize dangerous filename characters', async () => { + const result = await generateExportFile( + sampleQAs, + 'csv', + 'test:name.xlsx', + ); + + expect(result.filename).toBe('test_file__name.csv'); + }); +}); diff --git a/apps/api/src/questionnaire/utils/export-generator.ts b/apps/api/src/questionnaire/utils/export-generator.ts index 9485cc6387..25d9fcd607 100644 --- a/apps/api/src/questionnaire/utils/export-generator.ts +++ b/apps/api/src/questionnaire/utils/export-generator.ts @@ -1,4 +1,4 @@ -import * as XLSX from 'xlsx'; +import ExcelJS from 'exceljs'; import { jsPDF } from 'jspdf'; import type { QuestionAnswer } from './question-parser'; @@ -13,11 +13,11 @@ export interface ExportResult { /** * Generates an export file in the specified format */ -export function generateExportFile( +export async function generateExportFile( questionsAndAnswers: QuestionAnswer[], format: ExportFormat, vendorName: string, -): ExportResult { +): Promise { // Remove original extension if present and get base name const baseName = vendorName.replace(/\.[^/.]+$/, ''); // Keep the original name but sanitize only dangerous characters for filenames @@ -26,7 +26,7 @@ export function generateExportFile( switch (format) { case 'xlsx': return { - fileBuffer: generateXLSX(questionsAndAnswers), + fileBuffer: await generateXLSX(questionsAndAnswers), mimeType: 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet', filename: `${sanitizedBaseName}.xlsx`, @@ -52,20 +52,27 @@ export function generateExportFile( /** * Generates an XLSX file buffer from questions and answers */ -export function generateXLSX(questionsAndAnswers: QuestionAnswer[]): Buffer { - const workbook = XLSX.utils.book_new(); - const worksheetData = [ - ['#', 'Question', 'Answer'], - ...questionsAndAnswers.map((qa, index) => [ - index + 1, - qa.question, - qa.answer || '', - ]), +export async function generateXLSX( + questionsAndAnswers: QuestionAnswer[], +): Promise { + const workbook = new ExcelJS.Workbook(); + const worksheet = workbook.addWorksheet('Questionnaire'); + + // Set column widths + worksheet.columns = [ + { header: '#', key: 'num', width: 5 }, + { header: 'Question', key: 'question', width: 60 }, + { header: 'Answer', key: 'answer', width: 60 }, ]; - const worksheet = XLSX.utils.aoa_to_sheet(worksheetData); - worksheet['!cols'] = [{ wch: 5 }, { wch: 60 }, { wch: 60 }]; - XLSX.utils.book_append_sheet(workbook, worksheet, 'Questionnaire'); - return XLSX.write(workbook, { type: 'buffer', bookType: 'xlsx' }); + + // Add data rows + for (let i = 0; i < questionsAndAnswers.length; i++) { + const qa = questionsAndAnswers[i]; + worksheet.addRow({ num: i + 1, question: qa.question, answer: qa.answer || '' }); + } + + const xlsxBuffer = await workbook.xlsx.writeBuffer(); + return Buffer.from(xlsxBuffer); } /** diff --git a/apps/api/src/training/training-certificate-pdf.service.spec.ts b/apps/api/src/training/training-certificate-pdf.service.spec.ts new file mode 100644 index 0000000000..09a7e12363 --- /dev/null +++ b/apps/api/src/training/training-certificate-pdf.service.spec.ts @@ -0,0 +1,67 @@ +import { TrainingCertificatePdfService } from './training-certificate-pdf.service'; + +// Mock fetch for logo download +global.fetch = jest.fn().mockResolvedValue({ + ok: false, +}) as jest.Mock; + +describe('TrainingCertificatePdfService', () => { + let service: TrainingCertificatePdfService; + + beforeEach(() => { + service = new TrainingCertificatePdfService(); + }); + + describe('generateTrainingCertificatePdf', () => { + it('returns a valid PDF buffer', async () => { + const result = await service.generateTrainingCertificatePdf({ + userName: 'Jane Doe', + organizationName: 'Acme Corp', + completedAt: new Date('2026-01-15'), + }); + + expect(result).toBeInstanceOf(Buffer); + expect(result.length).toBeGreaterThan(0); + // PDF magic bytes + expect(result.subarray(0, 5).toString()).toBe('%PDF-'); + }); + + it('handles unicode characters in names', async () => { + const result = await service.generateTrainingCertificatePdf({ + userName: 'Jos\u00e9 Garc\u00eda', + organizationName: 'Caf\u00e9 Corp\u2014LLC', + completedAt: new Date('2026-03-01'), + }); + + expect(result).toBeInstanceOf(Buffer); + expect(result.length).toBeGreaterThan(0); + }); + + it('includes completion date in the PDF', async () => { + const result = await service.generateTrainingCertificatePdf({ + userName: 'Test User', + organizationName: 'Test Org', + completedAt: new Date('2026-06-15'), + }); + + const pdfText = result.toString('latin1'); + expect(pdfText).toContain('June'); + expect(pdfText).toContain('2026'); + }); + + it('handles logo fetch failure gracefully', async () => { + (global.fetch as jest.Mock).mockRejectedValueOnce( + new Error('Network error'), + ); + + const result = await service.generateTrainingCertificatePdf({ + userName: 'Logo User', + organizationName: 'Logo Org', + completedAt: new Date('2026-01-01'), + }); + + expect(result).toBeInstanceOf(Buffer); + expect(result.length).toBeGreaterThan(0); + }); + }); +}); diff --git a/apps/api/src/trigger/vector-store/helpers/extract-content-from-file.spec.ts b/apps/api/src/trigger/vector-store/helpers/extract-content-from-file.spec.ts new file mode 100644 index 0000000000..f3a106ab09 --- /dev/null +++ b/apps/api/src/trigger/vector-store/helpers/extract-content-from-file.spec.ts @@ -0,0 +1,162 @@ +import { extractContentFromFile } from './extract-content-from-file'; +import ExcelJS from 'exceljs'; + +// Mock external dependencies that aren't relevant to Excel tests +jest.mock('@/vector-store/logger', () => ({ + logger: { + info: jest.fn(), + warn: jest.fn(), + error: jest.fn(), + }, +})); + +jest.mock('@ai-sdk/openai', () => ({ + openai: jest.fn(), +})); + +jest.mock('ai', () => ({ + generateText: jest.fn(), +})); + +jest.mock('mammoth', () => ({ + default: { + extractRawText: jest.fn(), + convertToHtml: jest.fn(), + }, +})); + +async function createTestExcelBuffer( + sheets: { name: string; rows: (string | number)[][] }[], +): Promise { + const workbook = new ExcelJS.Workbook(); + for (const sheet of sheets) { + const ws = workbook.addWorksheet(sheet.name); + for (const row of sheet.rows) { + ws.addRow(row); + } + } + const arrayBuffer = await workbook.xlsx.writeBuffer(); + return Buffer.from(arrayBuffer); +} + +describe('extractContentFromFile - Excel handling', () => { + const XLSX_MIME = + 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'; + + it('should extract content from a single-sheet Excel file', async () => { + const buffer = await createTestExcelBuffer([ + { + name: 'Data', + rows: [ + ['Name', 'Value'], + ['Alice', 100], + ['Bob', 200], + ], + }, + ]); + + const base64 = buffer.toString('base64'); + const result = await extractContentFromFile(base64, XLSX_MIME); + + expect(result).toContain('Sheet: Data'); + expect(result).toContain('Name'); + expect(result).toContain('Alice'); + expect(result).toContain('100'); + expect(result).toContain('Bob'); + expect(result).toContain('200'); + }); + + it('should extract content from multiple sheets', async () => { + const buffer = await createTestExcelBuffer([ + { name: 'Sheet1', rows: [['Hello', 'World']] }, + { name: 'Sheet2', rows: [['Foo', 'Bar']] }, + ]); + + const base64 = buffer.toString('base64'); + const result = await extractContentFromFile(base64, XLSX_MIME); + + expect(result).toContain('Sheet: Sheet1'); + expect(result).toContain('Hello'); + expect(result).toContain('Sheet: Sheet2'); + expect(result).toContain('Foo'); + }); + + it('should skip empty rows', async () => { + const buffer = await createTestExcelBuffer([ + { + name: 'Sparse', + rows: [ + ['Data1'], + [], // empty row won't be added by ExcelJS addRow with empty array + ['Data2'], + ], + }, + ]); + + const base64 = buffer.toString('base64'); + const result = await extractContentFromFile(base64, XLSX_MIME); + + expect(result).toContain('Data1'); + expect(result).toContain('Data2'); + }); + + it('should handle XLS MIME type', async () => { + const buffer = await createTestExcelBuffer([ + { name: 'Test', rows: [['Value']] }, + ]); + + const base64 = buffer.toString('base64'); + // application/vnd.ms-excel is also accepted + const result = await extractContentFromFile( + base64, + 'application/vnd.ms-excel', + ); + + expect(result).toContain('Value'); + }); + + it('should throw on corrupt Excel data', async () => { + const badData = Buffer.from('not an excel file').toString('base64'); + + await expect( + extractContentFromFile(badData, XLSX_MIME), + ).rejects.toThrow('Failed to parse Excel file'); + }); +}); + +describe('extractContentFromFile - non-Excel types', () => { + it('should handle CSV files', async () => { + const csv = 'col1,col2\nval1,val2\n'; + const base64 = Buffer.from(csv).toString('base64'); + + const result = await extractContentFromFile(base64, 'text/csv'); + + expect(result).toContain('col1,col2'); + expect(result).toContain('val1,val2'); + }); + + it('should handle plain text files', async () => { + const text = 'Hello, world!'; + const base64 = Buffer.from(text).toString('base64'); + + const result = await extractContentFromFile(base64, 'text/plain'); + + expect(result).toBe(text); + }); + + it('should throw for unsupported file types', async () => { + const base64 = Buffer.from('data').toString('base64'); + + await expect( + extractContentFromFile(base64, 'application/octet-stream'), + ).rejects.toThrow('Unsupported file type'); + }); + + it('should throw for legacy .doc files', async () => { + const base64 = Buffer.from('data').toString('base64'); + + await expect( + extractContentFromFile(base64, 'application/msword'), + ).rejects.toThrow('Legacy Word documents'); + }); +}); diff --git a/apps/api/src/trigger/vector-store/helpers/extract-content-from-file.ts b/apps/api/src/trigger/vector-store/helpers/extract-content-from-file.ts index 6c8784a11c..e57d84e9c3 100644 --- a/apps/api/src/trigger/vector-store/helpers/extract-content-from-file.ts +++ b/apps/api/src/trigger/vector-store/helpers/extract-content-from-file.ts @@ -1,9 +1,21 @@ import { logger } from '@/vector-store/logger'; import { openai } from '@ai-sdk/openai'; import { generateText } from 'ai'; -import * as XLSX from 'xlsx'; +import ExcelJS from 'exceljs'; import mammoth from 'mammoth'; +/** + * Loads an Excel workbook from a Uint8Array/Buffer. + * ExcelJS type declarations are incompatible with Node 22+ / TS 5.8+ Buffer types, + * so we use a typed wrapper to avoid the mismatch. + */ +async function loadWorkbook(data: Uint8Array): Promise { + const workbook = new ExcelJS.Workbook(); + type LoadFn = (data: Uint8Array) => Promise; + await (workbook.xlsx.load as unknown as LoadFn)(data); + return workbook; +} + const htmlEntityMap = { ' ': ' ', '&': '&', @@ -54,42 +66,39 @@ export async function extractContentFromFile( fileSizeMB, }); - const workbook = XLSX.read(fileBuffer, { type: 'buffer' }); + const workbook = await loadWorkbook(fileBuffer); // Process sheets sequentially const sheets: string[] = []; - for (const sheetName of workbook.SheetNames) { - const worksheet = workbook.Sheets[sheetName]; - const jsonData = XLSX.utils.sheet_to_json(worksheet, { - header: 1, - defval: '', + for (const worksheet of workbook.worksheets) { + const lines: string[] = []; + + worksheet.eachRow((row) => { + const cells = row.values as unknown[]; + // ExcelJS row.values is 1-indexed (index 0 is undefined) + const filtered = cells + .slice(1) + .filter( + (cell) => cell !== null && cell !== undefined && cell !== '', + ) + .map((cell) => String(cell)); + + if (filtered.length > 0) { + lines.push(filtered.join(' | ')); + } }); - // Convert to readable text format - const sheetText = jsonData - .map((row: any) => { - if (Array.isArray(row)) { - return row - .filter( - (cell) => cell !== null && cell !== undefined && cell !== '', - ) - .join(' | '); - } - return String(row); - }) - .filter((line: string) => line.trim() !== '') - .join('\n'); - + const sheetText = lines.join('\n'); if (sheetText.trim()) { - sheets.push(`Sheet: ${sheetName}\n${sheetText}`); + sheets.push(`Sheet: ${worksheet.name}\n${sheetText}`); } } const extractionTime = ((Date.now() - excelStartTime) / 1000).toFixed(2); logger.info('Excel file processed', { fileSizeMB, - totalSheets: workbook.SheetNames.length, + totalSheets: workbook.worksheets.length, extractedLength: sheets.join('\n\n').length, extractionTimeSeconds: extractionTime, }); diff --git a/apps/api/src/trust-portal/policy-pdf-renderer.service.spec.ts b/apps/api/src/trust-portal/policy-pdf-renderer.service.spec.ts new file mode 100644 index 0000000000..85c55bf9a2 --- /dev/null +++ b/apps/api/src/trust-portal/policy-pdf-renderer.service.spec.ts @@ -0,0 +1,177 @@ +import { PolicyPdfRendererService } from './policy-pdf-renderer.service'; + +describe('PolicyPdfRendererService', () => { + let service: PolicyPdfRendererService; + + beforeEach(() => { + service = new PolicyPdfRendererService(); + }); + + describe('renderPoliciesPdfBuffer', () => { + it('returns a valid PDF buffer for a simple policy', () => { + const result = service.renderPoliciesPdfBuffer( + [ + { + name: 'Privacy Policy', + content: { + type: 'doc', + content: [ + { + type: 'paragraph', + content: [{ type: 'text', text: 'We respect your privacy.' }], + }, + ], + }, + }, + ], + 'Test Org', + ); + + expect(result).toBeInstanceOf(Buffer); + expect(result.length).toBeGreaterThan(0); + expect(result.subarray(0, 5).toString()).toBe('%PDF-'); + }); + + it('handles multiple policies', () => { + const policies = [ + { + name: 'Policy A', + content: { + type: 'doc', + content: [ + { + type: 'paragraph', + content: [{ type: 'text', text: 'Content A' }], + }, + ], + }, + }, + { + name: 'Policy B', + content: { + type: 'doc', + content: [ + { + type: 'paragraph', + content: [{ type: 'text', text: 'Content B' }], + }, + ], + }, + }, + ]; + + const result = service.renderPoliciesPdfBuffer(policies, 'Test Org'); + + expect(result).toBeInstanceOf(Buffer); + expect(result.length).toBeGreaterThan(0); + }); + + it('handles empty content', () => { + const result = service.renderPoliciesPdfBuffer( + [{ name: 'Empty Policy', content: null }], + 'Test Org', + ); + + expect(result).toBeInstanceOf(Buffer); + }); + + it('handles policies without organization name', () => { + const result = service.renderPoliciesPdfBuffer([ + { + name: 'Standalone Policy', + content: { + type: 'doc', + content: [ + { + type: 'paragraph', + content: [{ type: 'text', text: 'Standalone content.' }], + }, + ], + }, + }, + ]); + + expect(result).toBeInstanceOf(Buffer); + }); + + it('handles rich content with headings, bold, lists', () => { + const result = service.renderPoliciesPdfBuffer( + [ + { + name: 'Rich Policy', + content: { + type: 'doc', + content: [ + { + type: 'heading', + attrs: { level: 1 }, + content: [{ type: 'text', text: 'Section 1' }], + }, + { + type: 'paragraph', + content: [ + { + type: 'text', + text: 'Bold text', + marks: [{ type: 'bold' }], + }, + ], + }, + { + type: 'bulletList', + content: [ + { + type: 'listItem', + content: [ + { + type: 'paragraph', + content: [{ type: 'text', text: 'Item 1' }], + }, + ], + }, + { + type: 'listItem', + content: [ + { + type: 'paragraph', + content: [{ type: 'text', text: 'Item 2' }], + }, + ], + }, + ], + }, + ], + }, + }, + ], + 'Test Org', + ); + + expect(result).toBeInstanceOf(Buffer); + expect(result.length).toBeGreaterThan(0); + }); + + it('applies custom primary color', () => { + const result = service.renderPoliciesPdfBuffer( + [ + { + name: 'Branded Policy', + content: { + type: 'doc', + content: [ + { + type: 'paragraph', + content: [{ type: 'text', text: 'Content' }], + }, + ], + }, + }, + ], + 'Branded Org', + '#ff6600', + ); + + expect(result).toBeInstanceOf(Buffer); + }); + }); +}); diff --git a/apps/app/package.json b/apps/app/package.json index e2799c1119..827d19a602 100644 --- a/apps/app/package.json +++ b/apps/app/package.json @@ -95,7 +95,7 @@ "dub": "^0.66.1", "framer-motion": "^12.18.1", "geist": "^1.3.1", - "jspdf": "^3.0.2", + "jspdf": "^4.2.0", "lucide-react": "^0.577.0", "mammoth": "^1.11.0", "motion": "^12.35.0", @@ -136,7 +136,6 @@ "use-debounce": "^10.0.4", "use-long-press": "^3.3.0", "use-stick-to-bottom": "^1.1.3", - "xlsx": "^0.18.5", "xml2js": "^0.6.2", "zaraz-ts": "^1.2.0", "zod": "^4.0.0", diff --git a/apps/portal/package.json b/apps/portal/package.json index 0b71ffb67a..cc2d868cc8 100644 --- a/apps/portal/package.json +++ b/apps/portal/package.json @@ -23,7 +23,7 @@ "better-auth": "^1.4.5", "class-variance-authority": "^0.7.1", "geist": "^1.3.1", - "jspdf": "^3.0.3", + "jspdf": "^4.2.0", "jszip": "^3.10.1", "next": "^16.0.10", "next-safe-action": "^8.0.3", diff --git a/bun.lock b/bun.lock index 1828dfd5c2..fd32e62666 100644 --- a/bun.lock +++ b/bun.lock @@ -13,7 +13,6 @@ "cheerio": "^1.2.0", "react-syntax-highlighter": "^15.6.6", "unpdf": "^1.4.0", - "xlsx": "^0.18.5", "zod": "^4.3.6", }, "devDependencies": { @@ -115,7 +114,7 @@ "express": "^4.21.2", "helmet": "^8.1.0", "jose": "^6.0.12", - "jspdf": "^3.0.3", + "jspdf": "^4.2.0", "mammoth": "^1.8.0", "nanoid": "^5.1.6", "pdf-lib": "^1.17.1", @@ -129,7 +128,6 @@ "safe-stable-stringify": "^2.5.0", "stripe": "^20.4.0", "swagger-ui-express": "^5.0.1", - "xlsx": "^0.18.5", "zod": "^4.0.14", }, "devDependencies": { @@ -259,7 +257,7 @@ "dub": "^0.66.1", "framer-motion": "^12.18.1", "geist": "^1.3.1", - "jspdf": "^3.0.2", + "jspdf": "^4.2.0", "lucide-react": "^0.577.0", "mammoth": "^1.11.0", "motion": "^12.35.0", @@ -300,7 +298,6 @@ "use-debounce": "^10.0.4", "use-long-press": "^3.3.0", "use-stick-to-bottom": "^1.1.3", - "xlsx": "^0.18.5", "xml2js": "^0.6.2", "zaraz-ts": "^1.2.0", "zod": "^4.0.0", @@ -363,7 +360,7 @@ "better-auth": "^1.4.5", "class-variance-authority": "^0.7.1", "geist": "^1.3.1", - "jspdf": "^3.0.3", + "jspdf": "^4.2.0", "jszip": "^3.10.1", "next": "^16.0.10", "next-safe-action": "^8.0.3", @@ -2795,8 +2792,6 @@ "acorn-walk": ["acorn-walk@8.3.4", "", { "dependencies": { "acorn": "^8.11.0" } }, "sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g=="], - "adler-32": ["adler-32@1.3.1", "", {}, "sha512-ynZ4w/nUUv5rrsR8UUGoe1VC9hZj6V5hU9Qw1HlMDJGEJw5S7TfTErWTjMys6M7vr0YWcPqs3qAr4ss0nDfP+A=="], - "adm-zip": ["adm-zip@0.5.16", "", {}, "sha512-TGw5yVi4saajsSEgz25grObGHEUaDrniwvA2qwSC060KfqGPdglhvPMA2lPIoxs3PQIItj2iag35fONcQqgUaQ=="], "agent-base": ["agent-base@7.1.4", "", {}, "sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ=="], @@ -3077,8 +3072,6 @@ "ccount": ["ccount@2.0.1", "", {}, "sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg=="], - "cfb": ["cfb@1.2.2", "", { "dependencies": { "adler-32": "~1.3.0", "crc-32": "~1.2.0" } }, "sha512-KfdUZsSOw19/ObEWasvBP/Ac4reZvAGauZhs6S/gqNhXhI7cKwvlH7ulj+dOEYnca4bm4SGo8C1bTAQvnTjgQA=="], - "chai": ["chai@5.3.3", "", { "dependencies": { "assertion-error": "^2.0.1", "check-error": "^2.1.1", "deep-eql": "^5.0.1", "loupe": "^3.1.0", "pathval": "^2.0.0" } }, "sha512-4zNhdJD/iOjSH0A05ea+Ke6MU5mmpQcbQsSOkgdaUMJ9zTlDTD/GYlwohmIE2u0gaxHYiVHEn1Fw9mZ/ktJWgw=="], "chainsaw": ["chainsaw@0.1.0", "", { "dependencies": { "traverse": ">=0.3.0 <0.4" } }, "sha512-75kWfWt6MEKNC8xYXIdRpDehRYY/tNSgwKaJq+dbbDcxORuVrrQ+SEHoWsniVn9XPYfP4gmdWIeDk/4YNp1rNQ=="], @@ -3167,8 +3160,6 @@ "code-point-at": ["code-point-at@1.1.0", "", {}, "sha512-RpAVKQA5T63xEj6/giIbUEtZwJ4UFIc3ZtvEkiaUERylqe8xb5IvqcgOurZLahv93CLKfxcw5YI+DZcUBRyLXA=="], - "codepage": ["codepage@1.15.0", "", {}, "sha512-3g6NUTPd/YtuuGrhMnOMRjFc+LJw/bnMp3+0r/Wcz3IXUuCosKRJvMphm5+Q+bvTVGcJJuRvVLuYba+WojaFaA=="], - "collect-v8-coverage": ["collect-v8-coverage@1.0.3", "", {}, "sha512-1L5aqIkwPfiodaMgQunkF1zRhNqifHBmtbbbxcr6yVxxBnliw4TDOW6NxpO8DJLgJ16OT+Y4ztZqP6p/FtXnAw=="], "color-convert": ["color-convert@2.0.1", "", { "dependencies": { "color-name": "~1.1.4" } }, "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ=="], @@ -3829,8 +3820,6 @@ "forwarded": ["forwarded@0.2.0", "", {}, "sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow=="], - "frac": ["frac@1.1.2", "", {}, "sha512-w/XBfkibaTl3YDqASwfDUqkna4Z2p9cFSr1aHDt0WoMTECnRfBOv2WArlZILlqgWlmdIlALXGpM2AOhEk5W3IA=="], - "fraction.js": ["fraction.js@5.3.4", "", {}, "sha512-1X1NTtiJphryn/uLQz3whtY6jK3fTqoE3ohKs0tT+Ujr1W59oopxmoEh7Lu5p6vBaPbgoM0bzveAW4Qi5RyWDQ=="], "framer-motion": ["framer-motion@12.35.0", "", { "dependencies": { "motion-dom": "^12.35.0", "motion-utils": "^12.29.2", "tslib": "^2.4.0" }, "peerDependencies": { "@emotion/is-prop-valid": "*", "react": "^18.0.0 || ^19.0.0", "react-dom": "^18.0.0 || ^19.0.0" }, "optionalPeers": ["@emotion/is-prop-valid", "react", "react-dom"] }, "sha512-w8hghCMQ4oq10j6aZh3U2yeEQv5K69O/seDI/41PK4HtgkLrcBovUNc0ayBC3UyyU7V1mrY2yLzvYdWJX9pGZQ=="], @@ -4365,7 +4354,7 @@ "jsonwebtoken": ["jsonwebtoken@9.0.3", "", { "dependencies": { "jws": "^4.0.1", "lodash.includes": "^4.3.0", "lodash.isboolean": "^3.0.3", "lodash.isinteger": "^4.0.4", "lodash.isnumber": "^3.0.3", "lodash.isplainobject": "^4.0.6", "lodash.isstring": "^4.0.1", "lodash.once": "^4.0.0", "ms": "^2.1.1", "semver": "^7.5.4" } }, "sha512-MT/xP0CrubFRNLNKvxJ2BYfy53Zkm++5bX9dtuPbqAeQpTVe0MQTFhao8+Cp//EmJp244xt6Drw/GVEGCUj40g=="], - "jspdf": ["jspdf@3.0.4", "", { "dependencies": { "@babel/runtime": "^7.28.4", "fast-png": "^6.2.0", "fflate": "^0.8.1" }, "optionalDependencies": { "canvg": "^3.0.11", "core-js": "^3.6.0", "dompurify": "^3.2.4", "html2canvas": "^1.0.0-rc.5" } }, "sha512-dc6oQ8y37rRcHn316s4ngz/nOjayLF/FFxBF4V9zamQKRqXxyiH1zagkCdktdWhtoQId5K20xt1lB90XzkB+hQ=="], + "jspdf": ["jspdf@4.2.0", "", { "dependencies": { "@babel/runtime": "^7.28.6", "fast-png": "^6.2.0", "fflate": "^0.8.1" }, "optionalDependencies": { "canvg": "^3.0.11", "core-js": "^3.6.0", "dompurify": "^3.3.1", "html2canvas": "^1.0.0-rc.5" } }, "sha512-hR/hnRevAXXlrjeqU5oahOE+Ln9ORJUB5brLHHqH67A+RBQZuFr5GkbI9XQI8OUFSEezKegsi45QRpc4bGj75Q=="], "jsprim": ["jsprim@1.4.2", "", { "dependencies": { "assert-plus": "1.0.0", "extsprintf": "1.3.0", "json-schema": "0.4.0", "verror": "1.10.0" } }, "sha512-P2bSOMAc/ciLz6DzgjVlGJP9+BrJWu5UDGK70C2iweC5QBIeFf0ZXRvGjEj2uYgrY2MkAAhsSWHDWlFtEroZWw=="], @@ -5605,8 +5594,6 @@ "sqids": ["sqids@0.3.0", "", {}, "sha512-lOQK1ucVg+W6n3FhRwwSeUijxe93b51Bfz5PMRMihVf1iVkl82ePQG7V5vwrhzB11v0NtsR25PSZRGiSomJaJw=="], - "ssf": ["ssf@0.11.2", "", { "dependencies": { "frac": "~1.1.2" } }, "sha512-+idbmIXoYET47hH+d7dfm2epdOMUDjqcB4648sTZ+t2JwoyBFL/insLfB/racrDmsKB3diwsDA696pZMieAC5g=="], - "sshpk": ["sshpk@1.18.0", "", { "dependencies": { "asn1": "~0.2.3", "assert-plus": "^1.0.0", "bcrypt-pbkdf": "^1.0.0", "dashdash": "^1.12.0", "ecc-jsbn": "~0.1.1", "getpass": "^0.1.1", "jsbn": "~0.1.0", "safer-buffer": "^2.0.2", "tweetnacl": "~0.14.0" }, "bin": { "sshpk-conv": "bin/sshpk-conv", "sshpk-sign": "bin/sshpk-sign", "sshpk-verify": "bin/sshpk-verify" } }, "sha512-2p2KJZTSqQ/I3+HX42EpYOa2l3f8Erv8MWKsy2I9uf4wA7yFIkXRffYdsx86y6z4vHtV8u7g+pPlr8/4ouAxsQ=="], "ssri": ["ssri@9.0.1", "", { "dependencies": { "minipass": "^3.1.1" } }, "sha512-o57Wcn66jMQvfHG1FlYbWeZWW/dHZhJXjpIcTfXldXEk5nz5lStPo3mK0OJQfGR3RbZUlbISexbljkJzuEj/8Q=="], @@ -6173,10 +6160,6 @@ "wide-align": ["wide-align@1.1.5", "", { "dependencies": { "string-width": "^1.0.2 || 2 || 3 || 4" } }, "sha512-eDMORYaPNZ4sQIuuYPDHdQvf4gyCF9rEEV/yPxGfwPkRodwEgiMUUXTx/dex+Me0wxx53S+NgUHaP7y3MGlDmg=="], - "wmf": ["wmf@1.0.2", "", {}, "sha512-/p9K7bEh0Dj6WbXg4JG0xvLQmIadrner1bi45VMJTfnbVHsc7yIajZyoSoK60/dtVBs12Fm6WkUI5/3WAVsNMw=="], - - "word": ["word@0.3.0", "", {}, "sha512-OELeY0Q61OXpdUfTp+oweA/vtLVg5VDOXh+3he3PNzLGG/y0oylSOC1xRVj0+l4vQ3tj/bB1HVHv1ocXkQceFA=="], - "word-wrap": ["word-wrap@1.2.5", "", {}, "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA=="], "wordwrap": ["wordwrap@1.0.0", "", {}, "sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q=="], @@ -6197,8 +6180,6 @@ "xdg-portable": ["xdg-portable@10.6.0", "", { "dependencies": { "os-paths": "^7.4.0" }, "optionalDependencies": { "fsevents": "*" } }, "sha512-xrcqhWDvtZ7WLmt8G4f3hHy37iK7D2idtosRgkeiSPZEPmBShp0VfmRBLWAPC6zLF48APJ21yfea+RfQMF4/Aw=="], - "xlsx": ["xlsx@0.18.5", "", { "dependencies": { "adler-32": "~1.3.0", "cfb": "~1.2.1", "codepage": "~1.15.0", "crc-32": "~1.2.1", "ssf": "~0.11.2", "wmf": "~1.0.1", "word": "~0.3.0" }, "bin": { "xlsx": "bin/xlsx.njs" } }, "sha512-dmg3LCjBPHZnQp5/F/+nnTa+miPJxUXB6vtk42YjBBKayDNagxGEeIdWApkYPOf3Z3pm3k62Knjzp7lMeTEtFQ=="], - "xml-name-validator": ["xml-name-validator@5.0.0", "", {}, "sha512-EvGK8EJ3DhaHfbRlETOWAS5pO9MZITeauHKJyb8wyajUfQUenkIg2MvLDTZ4T/TgIcm3HU0TFBgWWboAZ30UHg=="], "xml2js": ["xml2js@0.6.2", "", { "dependencies": { "sax": ">=0.6.0", "xmlbuilder": "~11.0.0" } }, "sha512-T4rieHaC1EXcES0Kxxj4JWgaUQHDk+qwHcYOCFHfiwKz7tOVPLq7Hjq9dM1WCMhylqMEfP7hMcOIChvotiZegA=="], @@ -6729,8 +6710,6 @@ "@trycompai/email/resend": ["resend@4.8.0", "", { "dependencies": { "@react-email/render": "1.1.2" } }, "sha512-R8eBOFQDO6dzRTDmaMEdpqrkmgSjPpVXt4nGfWsZdYOet0kqra0xgbvTES6HmCriZEXbmGk3e0DiGIaLFTFSHA=="], - "@trycompai/portal/@upstash/ratelimit": ["@upstash/ratelimit@2.0.7", "", { "dependencies": { "@upstash/core-analytics": "^0.0.10" }, "peerDependencies": { "@upstash/redis": "^1.34.3" } }, "sha512-qNQW4uBPKVk8c4wFGj2S/vfKKQxXx1taSJoSGBN36FeiVBBKHQgsjPbKUijZ9Xu5FyVK+pfiXWKIsQGyoje8Fw=="], - "@trycompai/portal/zod": ["zod@3.25.76", "", {}, "sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ=="], "@trycompai/ui/lucide-react": ["lucide-react@0.554.0", "", { "peerDependencies": { "react": "^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0" } }, "sha512-St+z29uthEJVx0Is7ellNkgTEhaeSoA42I7JjOCBCrc5X6LYMGSv0P/2uS5HDLTExP5tpiqRD2PyUEOS6s9UXA=="], @@ -6743,6 +6722,8 @@ "@types/cheerio/cheerio": ["cheerio@1.1.2", "", { "dependencies": { "cheerio-select": "^2.1.0", "dom-serializer": "^2.0.0", "domhandler": "^5.0.3", "domutils": "^3.2.2", "encoding-sniffer": "^0.2.1", "htmlparser2": "^10.0.0", "parse5": "^7.3.0", "parse5-htmlparser2-tree-adapter": "^7.1.0", "parse5-parser-stream": "^7.1.2", "undici": "^7.12.0", "whatwg-mimetype": "^4.0.0" } }, "sha512-IkxPpb5rS/d1IiLbHMgfPuS0FgiWTtFIm/Nj+2woXDLTZ7fOT2eqzgYbdMlLweqlHbsZjxEChoVK+7iph7jyQg=="], + "@types/jspdf/jspdf": ["jspdf@3.0.4", "", { "dependencies": { "@babel/runtime": "^7.28.4", "fast-png": "^6.2.0", "fflate": "^0.8.1" }, "optionalDependencies": { "canvg": "^3.0.11", "core-js": "^3.6.0", "dompurify": "^3.2.4", "html2canvas": "^1.0.0-rc.5" } }, "sha512-dc6oQ8y37rRcHn316s4ngz/nOjayLF/FFxBF4V9zamQKRqXxyiH1zagkCdktdWhtoQId5K20xt1lB90XzkB+hQ=="], + "@types/plist/xmlbuilder": ["xmlbuilder@11.0.1", "", {}, "sha512-fDlsI/kFEx7gLvbecc0/ohLG50fugQp8ryHzMTuW9vSa1GJ0XYWKnhsUx7oie3G98+r56aTQIUB4kht42R3JvA=="], "@types/react-syntax-highlighter/@types/react": ["@types/react@19.2.7", "", { "dependencies": { "csstype": "^3.2.2" } }, "sha512-MWtvHrGZLFttgeEj28VXHxpmwYbor/ATPYbBfSFZEIRK0ecCFLl2Qo55z52Hss+UV9CRN7trSeq1zbgx7YDWWg=="], @@ -7123,6 +7104,8 @@ "jsonwebtoken/ms": ["ms@2.1.3", "", {}, "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="], + "jspdf/@babel/runtime": ["@babel/runtime@7.29.2", "", {}, "sha512-JiDShH45zKHWyGe4ZNVRrCjBz8Nh9TMmZG1kh4QTK8hCBTWBi8Da+i7s1fJw7/lYpM4ccepSNfqzZ/QvABBi5g=="], + "jspdf/fflate": ["fflate@0.8.2", "", {}, "sha512-cPJU47OaAoCbg0pBvzsgpTPhmhqI5eJjh/JIu8tPj5q+T7iLvW/JAYUqmE7KOB4R1ZyEhzBaIQpQpardBF5z8A=="], "jszip/readable-stream": ["readable-stream@2.3.8", "", { "dependencies": { "core-util-is": "~1.0.0", "inherits": "~2.0.3", "isarray": "~1.0.0", "process-nextick-args": "~2.0.0", "safe-buffer": "~5.1.1", "string_decoder": "~1.1.1", "util-deprecate": "~1.0.1" } }, "sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA=="], @@ -8279,6 +8262,8 @@ "@types/cheerio/cheerio/undici": ["undici@7.16.0", "", {}, "sha512-QEg3HPMll0o3t2ourKwOeUAZ159Kn9mx5pnzHRQO8+Wixmh88YdZRiIwat0iNzNNXn0yoEtXJqFpyW7eM8BV7g=="], + "@types/jspdf/jspdf/fflate": ["fflate@0.8.2", "", {}, "sha512-cPJU47OaAoCbg0pBvzsgpTPhmhqI5eJjh/JIu8tPj5q+T7iLvW/JAYUqmE7KOB4R1ZyEhzBaIQpQpardBF5z8A=="], + "accepts/mime-types/mime-db": ["mime-db@1.52.0", "", {}, "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg=="], "ajv-formats/ajv/json-schema-traverse": ["json-schema-traverse@1.0.0", "", {}, "sha512-NM8/P9n3XjXhIZn1lLhkFaACTOURQXjWhV4BA/RnOv8xvgqtqpAX9IO4mRQxSx1Rlo4tqzeqb0sOlruaOy3dug=="], diff --git a/package.json b/package.json index ebc817ca18..2d2595c6a3 100644 --- a/package.json +++ b/package.json @@ -96,7 +96,6 @@ "cheerio": "^1.2.0", "react-syntax-highlighter": "^15.6.6", "unpdf": "^1.4.0", - "xlsx": "^0.18.5", "zod": "^4.3.6" } } diff --git a/packages/db/prisma/migrations/20260316200000_disable_employee_contractor_notifications/migration.sql b/packages/db/prisma/migrations/20260316200000_disable_employee_contractor_notifications/migration.sql new file mode 100644 index 0000000000..f8eb56ad8a --- /dev/null +++ b/packages/db/prisma/migrations/20260316200000_disable_employee_contractor_notifications/migration.sql @@ -0,0 +1,106 @@ +-- Disable non-portal notifications for employee and contractor roles. +-- These roles only access the portal (policies + training), so they should +-- not receive task, mention, digest, or finding notifications. +-- +-- We insert records for ALL built-in roles (not just employee/contractor) +-- so that the union logic in isUserUnsubscribed has complete data when a +-- user holds multiple roles (e.g. employee,admin). + +-- 1. Update existing saved settings for employee/contractor +UPDATE "role_notification_setting" +SET + "taskReminders" = false, + "taskAssignments" = false, + "taskMentions" = false, + "weeklyTaskDigest" = false, + "findingNotifications" = false, + "updatedAt" = now() +WHERE "role" IN ('employee', 'contractor'); + +-- 2. Insert default settings for all built-in roles where no record exists. +-- This ensures the union logic always has complete data for multi-role users. + +-- Owner: all notifications ON +INSERT INTO "role_notification_setting" ( + "id", "organizationId", "role", + "policyNotifications", "taskReminders", "taskAssignments", + "taskMentions", "weeklyTaskDigest", "findingNotifications", + "createdAt", "updatedAt" +) +SELECT + generate_prefixed_cuid('rns'::text), o."id", 'owner', + true, true, true, true, true, true, + now(), now() +FROM "Organization" o +WHERE NOT EXISTS ( + SELECT 1 FROM "role_notification_setting" rns + WHERE rns."organizationId" = o."id" AND rns."role" = 'owner' +); + +-- Admin: all notifications ON +INSERT INTO "role_notification_setting" ( + "id", "organizationId", "role", + "policyNotifications", "taskReminders", "taskAssignments", + "taskMentions", "weeklyTaskDigest", "findingNotifications", + "createdAt", "updatedAt" +) +SELECT + generate_prefixed_cuid('rns'::text), o."id", 'admin', + true, true, true, true, true, true, + now(), now() +FROM "Organization" o +WHERE NOT EXISTS ( + SELECT 1 FROM "role_notification_setting" rns + WHERE rns."organizationId" = o."id" AND rns."role" = 'admin' +); + +-- Auditor: policy + findings only +INSERT INTO "role_notification_setting" ( + "id", "organizationId", "role", + "policyNotifications", "taskReminders", "taskAssignments", + "taskMentions", "weeklyTaskDigest", "findingNotifications", + "createdAt", "updatedAt" +) +SELECT + generate_prefixed_cuid('rns'::text), o."id", 'auditor', + true, false, false, false, false, true, + now(), now() +FROM "Organization" o +WHERE NOT EXISTS ( + SELECT 1 FROM "role_notification_setting" rns + WHERE rns."organizationId" = o."id" AND rns."role" = 'auditor' +); + +-- Employee: policy only +INSERT INTO "role_notification_setting" ( + "id", "organizationId", "role", + "policyNotifications", "taskReminders", "taskAssignments", + "taskMentions", "weeklyTaskDigest", "findingNotifications", + "createdAt", "updatedAt" +) +SELECT + generate_prefixed_cuid('rns'::text), o."id", 'employee', + true, false, false, false, false, false, + now(), now() +FROM "Organization" o +WHERE NOT EXISTS ( + SELECT 1 FROM "role_notification_setting" rns + WHERE rns."organizationId" = o."id" AND rns."role" = 'employee' +); + +-- Contractor: policy only +INSERT INTO "role_notification_setting" ( + "id", "organizationId", "role", + "policyNotifications", "taskReminders", "taskAssignments", + "taskMentions", "weeklyTaskDigest", "findingNotifications", + "createdAt", "updatedAt" +) +SELECT + generate_prefixed_cuid('rns'::text), o."id", 'contractor', + true, false, false, false, false, false, + now(), now() +FROM "Organization" o +WHERE NOT EXISTS ( + SELECT 1 FROM "role_notification_setting" rns + WHERE rns."organizationId" = o."id" AND rns."role" = 'contractor' +); diff --git a/packages/email/lib/check-unsubscribe.ts b/packages/email/lib/check-unsubscribe.ts index 171e50af81..74437aaded 100644 --- a/packages/email/lib/check-unsubscribe.ts +++ b/packages/email/lib/check-unsubscribe.ts @@ -30,6 +30,18 @@ const ROLE_SETTING_FIELDS: Partial> = { const ADMIN_ROLES = new Set(['owner', 'admin']); +// Portal-only roles should not receive app notifications by default. +// When no role_notification_setting DB record exists, these defaults apply. +const PORTAL_ONLY_ROLES = new Set(['employee', 'contractor']); +const PORTAL_ONLY_DEFAULTS: RoleNotificationRecord = { + policyNotifications: true, + taskReminders: false, + taskAssignments: false, + taskMentions: false, + weeklyTaskDigest: false, + findingNotifications: false, +}; + interface RoleNotificationRecord { policyNotifications: boolean; taskReminders: boolean; @@ -47,7 +59,8 @@ interface RoleNotificationRecord { * 2. Check role notification settings for the user's roles in the org. * - If ALL roles disable this notification, the user is unsubscribed (no override). * - If ANY role enables it, fall through to personal preferences. - * - If no role settings are configured, fall through to personal preferences. + * - If no role settings are configured, portal-only roles (employee/contractor) + * use built-in defaults; other roles fall through to personal preferences. * 3. Check personal preferences — any user who previously opted out stays opted out. * Owners/admins can toggle freely; non-admin users see these as read-only in the UI * but their existing opt-outs are still honored so we don't re-subscribe people. @@ -182,6 +195,20 @@ export async function isUserUnsubscribed( // Role says ON — fall through to personal preferences. // This ensures users who previously opted out stay opted out, // even if their role matrix now enables the notification. + } else { + // No DB records — use built-in defaults for portal-only roles + const allPortalOnly = userRoles.every((r) => + PORTAL_ONLY_ROLES.has(r), + ); + if (allPortalOnly) { + const enabled = + PORTAL_ONLY_DEFAULTS[ + roleSettingField as keyof RoleNotificationRecord + ]; + if (!enabled) { + return true; + } + } } } }