Monorepo for wisp.place. A static site hosting service built on top of the AT Protocol. wisp.place

integrate observability package across hosting and firehose services

authored by

Claude Sonnet 4.5 and committed by nekomimi.pet 8f748915 fa567178

+173 -114
+1
apps/firehose-service/package.json
··· 20 20 "@wispplace/database": "workspace:*", 21 21 "@wispplace/fs-utils": "workspace:*", 22 22 "@wispplace/lexicons": "workspace:*", 23 + "@wispplace/observability": "workspace:*", 23 24 "@wispplace/safe-fetch": "workspace:*", 24 25 "@wispplace/tiered-storage": "workspace:*", 25 26 "hono": "^4.10.4",
+59 -16
apps/firehose-service/src/index.ts
··· 15 15 import { handleSiteCreateOrUpdate, fetchSiteRecord } from './lib/cache-writer'; 16 16 import { startRevalidateWorker, stopRevalidateWorker } from './lib/revalidate-worker'; 17 17 import { closeCacheInvalidationPublisher } from './lib/cache-invalidation'; 18 + import { initializeGrafanaExporters, createLogger, logCollector, errorTracker, metricsCollector } from '@wispplace/observability'; 19 + import { observabilityMiddleware, observabilityErrorHandler } from '@wispplace/observability/middleware/hono'; 20 + 21 + // Initialize Grafana exporters if configured 22 + initializeGrafanaExporters({ 23 + serviceName: 'firehose-service', 24 + serviceVersion: '1.0.0' 25 + }); 26 + 27 + const logger = createLogger('firehose-service'); 18 28 19 29 const app = new Hono(); 20 30 31 + // Add observability middleware 32 + app.use('*', observabilityMiddleware('firehose-service')); 33 + 34 + // Error handler 35 + app.onError(observabilityErrorHandler('firehose-service')); 36 + 21 37 // Health endpoint 22 38 app.get('/health', async (c) => { 23 39 const firehoseHealth = getFirehoseHealth(); ··· 38 54 if (isShuttingDown) return; 39 55 isShuttingDown = true; 40 56 41 - console.log(`\n[Service] Received ${signal}, shutting down...`); 57 + logger.info(`Received ${signal}, shutting down...`); 42 58 43 59 stopFirehose(); 44 60 await stopRevalidateWorker(); 45 61 await closeCacheInvalidationPublisher(); 46 62 await closeDatabase(); 47 63 48 - console.log('[Service] Shutdown complete'); 64 + logger.info('Shutdown complete'); 49 65 process.exit(0); 50 66 } 51 67 ··· 56 72 * Backfill mode - process existing sites from database 57 73 */ 58 74 async function runBackfill(): Promise<void> { 59 - console.log('[Backfill] Starting backfill mode'); 75 + logger.info('Starting backfill mode'); 60 76 const startTime = Date.now(); 61 77 const forceRewriteHtml = process.env.BACKFILL_FORCE_REWRITE_HTML === 'true'; 62 78 63 79 if (forceRewriteHtml) { 64 - console.log('[Backfill] Forcing HTML rewrite for all sites'); 80 + logger.info('Forcing HTML rewrite for all sites'); 65 81 } 66 82 67 83 let sites = await listAllSites(); 68 84 if (sites.length === 0) { 69 85 const cachedSites = await listAllSiteCaches(); 70 86 sites = cachedSites.map(site => ({ did: site.did, rkey: site.rkey })); 71 - console.log('[Backfill] Sites table empty; falling back to site_cache entries'); 87 + logger.info('Sites table empty; falling back to site_cache entries'); 72 88 } 73 89 74 - console.log(`[Backfill] Found ${sites.length} sites in database`); 90 + logger.info(`Found ${sites.length} sites in database`); 75 91 76 92 let processed = 0; 77 93 let skipped = 0; ··· 83 99 const result = await fetchSiteRecord(site.did, site.rkey); 84 100 85 101 if (!result) { 86 - console.log(`[Backfill] Site not found on PDS: ${site.did}/${site.rkey}`); 102 + logger.info(`Site not found on PDS: ${site.did}/${site.rkey}`); 87 103 skipped++; 88 104 continue; 89 105 } ··· 91 107 const existingCache = await getSiteCache(site.did, site.rkey); 92 108 // Check if CID matches (already up to date) 93 109 if (!forceRewriteHtml && existingCache && result.cid === existingCache.record_cid) { 94 - console.log(`[Backfill] Site already up to date: ${site.did}/${site.rkey}`); 110 + logger.info(`Site already up to date: ${site.did}/${site.rkey}`); 95 111 skipped++; 96 112 continue; 97 113 } ··· 102 118 }); 103 119 processed++; 104 120 105 - console.log(`[Backfill] Progress: ${processed + skipped + failed}/${sites.length}`); 121 + logger.info(`Progress: ${processed + skipped + failed}/${sites.length}`); 106 122 } catch (err) { 107 - console.error(`[Backfill] Failed to process ${site.did}/${site.rkey}:`, err); 123 + logger.error(`Failed to process ${site.did}/${site.rkey}`, err); 108 124 failed++; 109 125 } 110 126 } ··· 115 131 const elapsedRemSec = elapsedSec % 60; 116 132 const elapsedLabel = elapsedMin > 0 ? `${elapsedMin}m ${elapsedRemSec}s` : `${elapsedSec}s`; 117 133 118 - console.log(`[Backfill] Complete: ${processed} processed, ${skipped} skipped, ${failed} failed (${elapsedLabel} elapsed)`); 134 + logger.info(`Complete: ${processed} processed, ${skipped} skipped, ${failed} failed (${elapsedLabel} elapsed)`); 119 135 } 120 136 137 + // Internal observability endpoints (for admin panel) 138 + app.get('/__internal__/observability/logs', (c) => { 139 + const query = c.req.query(); 140 + const filter: any = {}; 141 + if (query.level) filter.level = query.level; 142 + if (query.service) filter.service = query.service; 143 + if (query.search) filter.search = query.search; 144 + if (query.eventType) filter.eventType = query.eventType; 145 + if (query.limit) filter.limit = parseInt(query.limit as string); 146 + return c.json({ logs: logCollector.getLogs(filter) }); 147 + }); 148 + 149 + app.get('/__internal__/observability/errors', (c) => { 150 + const query = c.req.query(); 151 + const filter: any = {}; 152 + if (query.service) filter.service = query.service; 153 + if (query.limit) filter.limit = parseInt(query.limit as string); 154 + return c.json({ errors: errorTracker.getErrors(filter) }); 155 + }); 156 + 157 + app.get('/__internal__/observability/metrics', (c) => { 158 + const query = c.req.query(); 159 + const timeWindow = query.timeWindow ? parseInt(query.timeWindow as string) : 3600000; 160 + const stats = metricsCollector.getStats('firehose-service', timeWindow); 161 + return c.json({ stats, timeWindow }); 162 + }); 163 + 121 164 // Main entry point 122 165 async function main() { 123 - console.log('[Service] Starting firehose-service'); 124 - console.log(`[Service] Mode: ${config.isBackfill ? 'backfill' : 'firehose'}`); 125 - console.log(`[Service] S3 Bucket: ${config.s3Bucket || '(disk fallback)'}`); 166 + logger.info('Starting firehose-service'); 167 + logger.info(`Mode: ${config.isBackfill ? 'backfill' : 'firehose'}`); 168 + logger.info(`S3 Bucket: ${config.s3Bucket || '(disk fallback)'}`); 126 169 127 170 // Start health server 128 171 const server = serve({ ··· 130 173 port: config.healthPort, 131 174 }); 132 175 133 - console.log(`[Service] Health endpoint: http://localhost:${config.healthPort}/health`); 176 + logger.info(`Health endpoint: http://localhost:${config.healthPort}/health`); 134 177 135 178 if (config.isBackfill) { 136 179 // Run backfill and exit ··· 145 188 } 146 189 147 190 main().catch((err) => { 148 - console.error('[Service] Fatal error:', err); 191 + logger.error('Fatal error', err); 149 192 process.exit(1); 150 193 });
+8 -6
apps/firehose-service/src/lib/cache-invalidation.ts
··· 7 7 */ 8 8 9 9 import Redis from 'ioredis'; 10 + import { createLogger } from '@wispplace/observability'; 10 11 import { config } from '../config'; 11 12 13 + const logger = createLogger('firehose-service'); 12 14 const CHANNEL = 'wisp:cache-invalidate'; 13 15 14 16 let publisher: Redis | null = null; ··· 17 19 function getPublisher(): Redis | null { 18 20 if (!config.redisUrl) { 19 21 if (!loggedMissingRedis) { 20 - console.warn('[CacheInvalidation] REDIS_URL not set; cache invalidation publishing disabled'); 22 + logger.warn('[CacheInvalidation] REDIS_URL not set; cache invalidation publishing disabled'); 21 23 loggedMissingRedis = true; 22 24 } 23 25 return null; 24 26 } 25 27 26 28 if (!publisher) { 27 - console.log(`[CacheInvalidation] Connecting to Redis for publishing: ${config.redisUrl}`); 29 + logger.info(`[CacheInvalidation] Connecting to Redis for publishing: ${config.redisUrl}`); 28 30 publisher = new Redis(config.redisUrl, { 29 31 maxRetriesPerRequest: 2, 30 32 enableReadyCheck: true, 31 33 }); 32 34 33 35 publisher.on('error', (err) => { 34 - console.error('[CacheInvalidation] Redis error:', err); 36 + logger.error('[CacheInvalidation] Redis error', err); 35 37 }); 36 38 37 39 publisher.on('ready', () => { 38 - console.log('[CacheInvalidation] Redis publisher connected'); 40 + logger.info('[CacheInvalidation] Redis publisher connected'); 39 41 }); 40 42 } 41 43 ··· 52 54 53 55 try { 54 56 const message = JSON.stringify({ did, rkey, action }); 55 - console.log(`[CacheInvalidation] Publishing ${action} for ${did}/${rkey} to ${CHANNEL}`); 57 + logger.debug(`[CacheInvalidation] Publishing ${action} for ${did}/${rkey} to ${CHANNEL}`); 56 58 await redis.publish(CHANNEL, message); 57 59 } catch (err) { 58 - console.error('[CacheInvalidation] Failed to publish:', err); 60 + logger.error('[CacheInvalidation] Failed to publish', err); 59 61 } 60 62 } 61 63
+34 -30
apps/firehose-service/src/lib/cache-writer.ts
··· 11 11 import { collectFileCidsFromEntries, countFilesInDirectory, normalizeFileCids } from '@wispplace/fs-utils'; 12 12 import { shouldCompressMimeType } from '@wispplace/atproto-utils/compression'; 13 13 import { MAX_BLOB_SIZE, MAX_FILE_COUNT, MAX_SITE_SIZE } from '@wispplace/constants'; 14 + import { createLogger } from '@wispplace/observability'; 14 15 import { writeFile, deleteFile, listFiles } from './storage'; 15 16 import { getSiteCache, upsertSiteCache, deleteSiteCache, upsertSiteSettingsCache, deleteSiteSettingsCache } from './db'; 16 17 import { rewriteHtmlPaths, isHtmlFile } from './html-rewriter'; 17 18 import { gunzipSync } from 'zlib'; 18 19 import { publishCacheInvalidation } from './cache-invalidation'; 20 + 21 + const logger = createLogger('firehose-service'); 19 22 20 23 /** 21 24 * Fetch a site record from the PDS ··· 24 27 try { 25 28 const pdsEndpoint = await getPdsForDid(did); 26 29 if (!pdsEndpoint) { 27 - console.error('[Cache] Failed to get PDS endpoint for DID', { did, rkey }); 30 + logger.error('Failed to get PDS endpoint for DID', undefined, { did, rkey }); 28 31 return null; 29 32 } 30 33 ··· 38 41 } catch (err) { 39 42 const errorMsg = err instanceof Error ? err.message : String(err); 40 43 if (errorMsg.includes('HTTP 404') || errorMsg.includes('Not Found')) { 41 - console.log('[Cache] Site record not found', { did, rkey }); 44 + logger.info('Site record not found', { did, rkey }); 42 45 } else { 43 - console.error('[Cache] Failed to fetch site record', { did, rkey, error: errorMsg }); 46 + logger.error('Failed to fetch site record', err, { did, rkey }); 44 47 } 45 48 return null; 46 49 } ··· 57 60 try { 58 61 const endpoint = pdsEndpoint ?? await getPdsForDid(did); 59 62 if (!endpoint) { 60 - console.error('[Cache] Failed to get PDS endpoint for DID (settings)', { did, rkey }); 63 + logger.error('Failed to get PDS endpoint for DID (settings)', undefined, { did, rkey }); 61 64 return null; 62 65 } 63 66 ··· 71 74 } catch (err) { 72 75 const errorMsg = err instanceof Error ? err.message : String(err); 73 76 if (errorMsg.includes('HTTP 404') || errorMsg.includes('Not Found')) { 74 - console.log('[Cache] Settings record not found', { did, rkey }); 77 + logger.info('Settings record not found', { did, rkey }); 75 78 } else { 76 - console.error('[Cache] Failed to fetch settings record', { did, rkey, error: errorMsg }); 79 + logger.error('Failed to fetch settings record', err, { did, rkey }); 77 80 } 78 81 return null; 79 82 } ··· 137 140 const MAX_DEPTH = 10; 138 141 139 142 if (depth >= MAX_DEPTH) { 140 - console.error('[Cache] Max subfs expansion depth reached'); 143 + logger.error('Max subfs expansion depth reached'); 141 144 return directory; 142 145 } 143 146 ··· 147 150 // Fetch uncached subfs records 148 151 const uncachedUris = subfsUris.filter(({ uri }) => !subfsCache.has(uri)); 149 152 if (uncachedUris.length > 0) { 150 - console.log(`[Cache] Fetching ${uncachedUris.length} subfs records (depth ${depth})`); 153 + logger.info(`Fetching ${uncachedUris.length} subfs records`, { depth }); 151 154 const fetchedRecords = await Promise.all( 152 155 uncachedUris.map(async ({ uri }) => { 153 156 const record = await fetchSubfsRecord(uri, pdsEndpoint); ··· 342 345 ): Promise<void> { 343 346 const blobUrl = `${pdsEndpoint}/xrpc/com.atproto.sync.getBlob?did=${encodeURIComponent(did)}&cid=${encodeURIComponent(file.cid)}`; 344 347 345 - console.log(`[Cache] Downloading ${file.path}`); 348 + logger.debug(`Downloading ${file.path}`); 346 349 347 350 let content = await safeFetchBlob(blobUrl, { maxSize: MAX_BLOB_SIZE, timeout: 300000 }); 348 351 let encoding = file.encoding; ··· 356 359 // Heuristic fallback: some records omit base64 flag but content is base64 text 357 360 const decoded = tryDecodeBase64(content); 358 361 if (decoded) { 359 - console.warn(`[Cache] Decoded base64 fallback for ${file.path} (base64 flag missing)`); 362 + logger.warn(`Decoded base64 fallback for ${file.path} (base64 flag missing)`); 360 363 content = decoded; 361 364 } 362 365 } ··· 370 373 content = gunzipSync(content); 371 374 encoding = undefined; 372 375 } catch (error) { 373 - console.error(`[Cache] Failed to decompress ${file.path}, storing gzipped`); 376 + logger.error(`Failed to decompress ${file.path}, storing gzipped`, error); 374 377 } 375 378 } else if (encoding === 'gzip' && content.length >= 2 && 376 379 !(content[0] === 0x1f && content[1] === 0x8b)) { 377 380 // If marked gzip but doesn't look gzipped, attempt base64 decode and retry 378 381 const decoded = tryDecodeBase64(content); 379 382 if (decoded && decoded.length >= 2 && decoded[0] === 0x1f && decoded[1] === 0x8b) { 380 - console.warn(`[Cache] Decoded base64+gzip fallback for ${file.path}`); 383 + logger.warn(`Decoded base64+gzip fallback for ${file.path}`); 381 384 try { 382 385 content = gunzipSync(decoded); 383 386 encoding = undefined; 384 387 } catch (error) { 385 - console.error(`[Cache] Failed to decompress base64+gzip fallback for ${file.path}, storing gzipped`); 388 + logger.error(`Failed to decompress base64+gzip fallback for ${file.path}, storing gzipped`, error); 386 389 content = decoded; 387 390 } 388 391 } ··· 413 416 try { 414 417 rewriteSource = gunzipSync(content); 415 418 } catch (error) { 416 - console.error(`[Cache] Failed to decompress ${file.path} for rewrite, using raw content`); 419 + logger.error(`Failed to decompress ${file.path} for rewrite, using raw content`, error); 417 420 } 418 421 } 419 422 ··· 423 426 424 427 const rewrittenKey = `${did}/${rkey}/.rewritten/${file.path}`; 425 428 await writeFile(rewrittenKey, rewrittenContent, { mimeType: 'text/html' }); 426 - console.log(`[Cache] Wrote rewritten HTML: ${rewrittenKey}`); 429 + logger.debug(`Wrote rewritten HTML: ${rewrittenKey}`); 427 430 } 428 431 429 - console.log(`[Cache] Stored ${file.path} (${content.length} bytes)`); 432 + logger.debug(`Stored ${file.path} (${content.length} bytes)`); 430 433 } 431 434 432 435 /** ··· 443 446 } 444 447 ): Promise<void> { 445 448 const forceRewriteHtml = options?.forceRewriteHtml === true; 446 - console.log(`[Cache] Processing site ${did}/${rkey}, record CID: ${recordCid}`, { 449 + logger.info(`Processing site ${did}/${rkey}`, { 450 + recordCid, 447 451 forceRewriteHtml, 448 452 }); 449 453 450 454 if (!record.root?.entries) { 451 - console.error('[Cache] Invalid record structure'); 455 + logger.error('Invalid record structure'); 452 456 return; 453 457 } 454 458 455 459 // Get PDS endpoint 456 460 const pdsEndpoint = await getPdsForDid(did); 457 461 if (!pdsEndpoint) { 458 - console.error('[Cache] Could not resolve PDS for', did); 462 + logger.error('Could not resolve PDS', undefined, { did }); 459 463 return; 460 464 } 461 465 ··· 465 469 // Validate limits 466 470 const fileCount = countFilesInDirectory(expandedRoot); 467 471 if (fileCount > MAX_FILE_COUNT) { 468 - console.error(`[Cache] Site exceeds file limit: ${fileCount} > ${MAX_FILE_COUNT}`); 472 + logger.error(`Site exceeds file limit: ${fileCount} > ${MAX_FILE_COUNT}`); 469 473 return; 470 474 } 471 475 472 476 const totalSize = calculateTotalBlobSize(expandedRoot); 473 477 if (totalSize > MAX_SITE_SIZE) { 474 - console.error(`[Cache] Site exceeds size limit: ${totalSize} > ${MAX_SITE_SIZE}`); 478 + logger.error(`Site exceeds size limit: ${totalSize} > ${MAX_SITE_SIZE}`); 475 479 return; 476 480 } 477 481 ··· 485 489 const normalizedFileCids = normalizeFileCids(rawFileCids); 486 490 const oldFileCids = normalizedFileCids.value; 487 491 if (normalizedFileCids.source === 'string-invalid' || normalizedFileCids.source === 'other') { 488 - console.warn('[Cache] Existing file_cids had unexpected shape; treating as empty', { 492 + logger.warn('Existing file_cids had unexpected shape; treating as empty', { 489 493 did, 490 494 rkey, 491 495 type: Array.isArray(rawFileCids) ? 'array' : typeof rawFileCids, ··· 512 516 } 513 517 } 514 518 515 - console.log(`[Cache] Files unchanged: ${newFiles.length - filesToDownload.length}, to download: ${filesToDownload.length}, to delete: ${pathsToDelete.length}`); 519 + logger.info(`Files unchanged: ${newFiles.length - filesToDownload.length}, to download: ${filesToDownload.length}, to delete: ${pathsToDelete.length}`); 516 520 517 521 // Download new/changed files (with concurrency limit) 518 522 const DOWNLOAD_CONCURRENCY = 20; ··· 541 545 } 542 546 543 547 // Update DB with new CIDs 544 - console.log(`[Cache] About to upsert site cache for ${did}/${rkey}`); 548 + logger.debug(`About to upsert site cache for ${did}/${rkey}`); 545 549 await upsertSiteCache(did, rkey, recordCid, newFileCids); 546 - console.log(`[Cache] Updated site cache for ${did}/${rkey} with record CID ${recordCid}`); 550 + logger.debug(`Updated site cache for ${did}/${rkey} with record CID ${recordCid}`); 547 551 548 552 // Backfill settings if a record exists for this rkey 549 553 const settingsRecord = await fetchSettingsRecord(did, rkey, pdsEndpoint); ··· 557 561 await publishCacheInvalidation(did, rkey, 'update'); 558 562 } 559 563 560 - console.log(`[Cache] Successfully cached site ${did}/${rkey}`); 564 + logger.info(`Successfully cached site ${did}/${rkey}`); 561 565 } 562 566 563 567 /** 564 568 * Handle a site delete event 565 569 */ 566 570 export async function handleSiteDelete(did: string, rkey: string): Promise<void> { 567 - console.log(`[Cache] Deleting site ${did}/${rkey}`); 571 + logger.info(`Deleting site ${did}/${rkey}`); 568 572 569 573 // List all files for this site and delete them 570 574 const prefix = `${did}/${rkey}/`; ··· 580 584 // Notify hosting-service to invalidate its local caches 581 585 await publishCacheInvalidation(did, rkey, 'delete'); 582 586 583 - console.log(`[Cache] Deleted site ${did}/${rkey} (${keys.length} files)`); 587 + logger.info(`Deleted site ${did}/${rkey} (${keys.length} files)`); 584 588 } 585 589 586 590 /** 587 591 * Handle settings create/update event 588 592 */ 589 593 export async function handleSettingsUpdate(did: string, rkey: string, settings: WispSettings, recordCid: string): Promise<void> { 590 - console.log(`[Cache] Updating settings for ${did}/${rkey}`); 594 + logger.info(`Updating settings for ${did}/${rkey}`); 591 595 592 596 await upsertSiteSettingsCache(did, rkey, recordCid, { 593 597 directoryListing: settings.directoryListing, ··· 606 610 * Handle settings delete event 607 611 */ 608 612 export async function handleSettingsDelete(did: string, rkey: string): Promise<void> { 609 - console.log(`[Cache] Deleting settings for ${did}/${rkey}`); 613 + logger.info(`Deleting settings for ${did}/${rkey}`); 610 614 await deleteSiteSettingsCache(did, rkey); 611 615 612 616 // Notify hosting-service to invalidate its local caches
+12 -11
apps/firehose-service/src/lib/db.ts
··· 1 1 import postgres from 'postgres'; 2 2 import type { SiteCache, SiteRecord, SiteSettingsCache } from '@wispplace/database'; 3 + import { createLogger } from '@wispplace/observability'; 3 4 import { config } from '../config'; 5 + 6 + const logger = createLogger('firehose-service'); 4 7 5 8 const sql = postgres(config.databaseUrl, { 6 9 max: 10, ··· 54 57 recordCid: string, 55 58 fileCids: Record<string, string> 56 59 ): Promise<void> { 57 - console.log(`[DB] upsertSiteCache starting for ${did}/${rkey}`); 60 + logger.debug(`[DB] upsertSiteCache starting for ${did}/${rkey}`); 58 61 try { 59 62 await sql` 60 63 INSERT INTO site_cache (did, rkey, record_cid, file_cids, cached_at, updated_at) ··· 65 68 file_cids = EXCLUDED.file_cids, 66 69 updated_at = EXTRACT(EPOCH FROM NOW()) 67 70 `; 68 - console.log(`[DB] upsertSiteCache completed for ${did}/${rkey}`); 71 + logger.debug(`[DB] upsertSiteCache completed for ${did}/${rkey}`); 69 72 } catch (err) { 70 - const error = err instanceof Error ? err : new Error(String(err)); 71 - console.error('[DB] upsertSiteCache error:', { did, rkey, error: error.message, stack: error.stack }); 72 - throw error; 73 + logger.error('[DB] upsertSiteCache error', err, { did, rkey }); 74 + throw err; 73 75 } 74 76 } 75 77 ··· 98 100 const indexFiles = settings.indexFiles ?? []; 99 101 const headers = settings.headers ?? []; 100 102 101 - console.log(`[DB] upsertSiteSettingsCache starting for ${did}/${rkey}`, { 103 + logger.debug(`[DB] upsertSiteSettingsCache starting for ${did}/${rkey}`, { 102 104 directoryListing, 103 105 spaMode, 104 106 custom404, ··· 134 136 headers = EXCLUDED.headers, 135 137 updated_at = EXTRACT(EPOCH FROM NOW()) 136 138 `; 137 - console.log(`[DB] upsertSiteSettingsCache completed for ${did}/${rkey}`); 139 + logger.debug(`[DB] upsertSiteSettingsCache completed for ${did}/${rkey}`); 138 140 } catch (err) { 139 - const error = err instanceof Error ? err : new Error(String(err)); 140 - console.error('[DB] upsertSiteSettingsCache error:', { did, rkey, error: error.message, stack: error.stack }); 141 - throw error; 141 + logger.error('[DB] upsertSiteSettingsCache error', err, { did, rkey }); 142 + throw err; 142 143 } 143 144 } 144 145 ··· 148 149 149 150 export async function closeDatabase(): Promise<void> { 150 151 await sql.end({ timeout: 5 }); 151 - console.log('[DB] Database connections closed'); 152 + logger.info('[DB] Database connections closed'); 152 153 } 153 154 154 155 export { sql };
+19 -23
apps/firehose-service/src/lib/firehose.ts
··· 8 8 import { isBun, BunFirehose, type Event, type CommitEvt } from '@wispplace/bun-firehose'; 9 9 import type { Record as WispFsRecord } from '@wispplace/lexicons/types/place/wisp/fs'; 10 10 import type { Record as WispSettings } from '@wispplace/lexicons/types/place/wisp/settings'; 11 + import { createLogger } from '@wispplace/observability'; 11 12 import { config } from '../config'; 12 13 import { 13 14 handleSiteCreateOrUpdate, ··· 18 19 } from './cache-writer'; 19 20 20 21 const idResolver = new IdResolver(); 22 + const logger = createLogger('firehose-service'); 21 23 22 24 // Track firehose health 23 25 let lastEventTime = Date.now(); ··· 70 72 const commitEvt = evt as CommitEvt; 71 73 const { did, collection, rkey, record, cid } = commitEvt; 72 74 73 - console.log(`[Firehose] Debug: Event ${evt.event} for ${collection}:${did}/${rkey}, CID: ${cid?.toString() || 'unknown'}`); 75 + logger.debug(`Event ${evt.event} for ${collection}:${did}/${rkey}`, { cid: cid?.toString() || 'unknown' }); 74 76 75 77 // Handle place.wisp.fs events 76 78 if (collection === 'place.wisp.fs') { 77 - console.log(`[Firehose] Received ${commitEvt.event} event for ${did}/${rkey}, CID: ${cid?.toString() || 'unknown'}`); 79 + logger.info(`[place.wisp.fs] Received ${commitEvt.event} event`, { did, rkey, cid: cid?.toString() || 'unknown' }); 78 80 processWithConcurrencyLimit(async () => { 79 81 try { 80 - console.log(`[Firehose] Inside handler for ${commitEvt.event} event for ${did}/${rkey}`); 82 + logger.debug(`[place.wisp.fs] Processing ${commitEvt.event} event`, { did, rkey }); 81 83 if (commitEvt.event === 'delete') { 82 84 await handleSiteDelete(did, rkey); 83 85 } else { ··· 87 89 if (verified) { 88 90 await handleSiteCreateOrUpdate(did, rkey, verified.record, verified.cid); 89 91 } else { 90 - console.log(`[Firehose] Skipping ${commitEvt.event} event for ${did}/${rkey} - verification failed`); 92 + logger.warn(`[place.wisp.fs] Skipping ${commitEvt.event} event - verification failed`, { did, rkey }); 91 93 } 92 94 } 93 - console.log(`[Firehose] Completed handler for ${commitEvt.event} event for ${did}/${rkey}`); 95 + logger.debug(`[place.wisp.fs] Completed ${commitEvt.event} event`, { did, rkey }); 94 96 } catch (err) { 95 - const error = err instanceof Error ? err : new Error(String(err)); 96 - console.error('[Firehose] Error handling place.wisp.fs event:', { did, rkey, event: commitEvt.event, error: error.message, stack: error.stack }); 97 + logger.error(`[place.wisp.fs] Error handling event`, err, { did, rkey, event: commitEvt.event }); 97 98 } 98 99 }).catch(err => { 99 - const error = err instanceof Error ? err : new Error(String(err)); 100 - console.error('[Firehose] Error processing place.wisp.fs event:', { did, rkey, event: commitEvt.event, error: error.message, stack: error.stack }); 100 + logger.error(`[place.wisp.fs] Error processing event`, err, { did, rkey, event: commitEvt.event }); 101 101 }); 102 102 } 103 103 ··· 112 112 await handleSettingsUpdate(did, rkey, record as WispSettings, cidStr); 113 113 } 114 114 } catch (err) { 115 - const error = err instanceof Error ? err : new Error(String(err)); 116 - console.error('[Firehose] Error handling place.wisp.settings event:', { did, rkey, event: commitEvt.event, error: error.message, stack: error.stack }); 115 + logger.error(`[place.wisp.settings] Error handling event`, err, { did, rkey, event: commitEvt.event }); 117 116 } 118 117 }).catch(err => { 119 - const error = err instanceof Error ? err : new Error(String(err)); 120 - console.error('[Firehose] Error processing place.wisp.settings event:', { did, rkey, event: commitEvt.event, error: error.message, stack: error.stack }); 118 + logger.error(`[place.wisp.settings] Error processing event`, err, { did, rkey, event: commitEvt.event }); 121 119 }); 122 120 } 123 121 } catch (err) { 124 - const error = err instanceof Error ? err : new Error(String(err)); 125 - console.error('[Firehose] Unexpected error in handleEvent:', { error: error.message, stack: error.stack }); 122 + logger.error('Unexpected error in handleEvent', err); 126 123 } 127 124 } 128 125 129 126 function handleError(err: Error): void { 130 - console.error('[Firehose] Error:', err); 131 - console.error('[Firehose] Stack:', err.stack); 127 + logger.error('Firehose connection error', err); 132 128 } 133 129 134 130 let firehoseHandle: { destroy: () => void } | null = null; ··· 137 133 * Start the firehose worker 138 134 */ 139 135 export function startFirehose(): void { 140 - console.log(`[Firehose] Starting (runtime: ${isBun ? 'Bun' : 'Node.js'})`); 141 - console.log(`[Firehose] Service: ${config.firehoseService}`); 142 - console.log(`[Firehose] Max concurrency: ${config.firehoseMaxConcurrency}`); 136 + logger.info(`Starting firehose (runtime: ${isBun ? 'Bun' : 'Node.js'})`); 137 + logger.info(`Service: ${config.firehoseService}`); 138 + logger.info(`Max concurrency: ${config.firehoseMaxConcurrency}`); 143 139 144 140 isConnected = true; 145 141 ··· 169 165 170 166 // Log cache info hourly 171 167 setInterval(() => { 172 - console.log('[Firehose] Hourly status check'); 168 + logger.info('Hourly status check'); 173 169 }, 60 * 60 * 1000); 174 170 175 171 // Log status periodically 176 172 setInterval(() => { 177 173 const health = getFirehoseHealth(); 178 174 if (health.timeSinceLastEvent > 30000) { 179 - console.log(`[Firehose] No events for ${Math.round(health.timeSinceLastEvent / 1000)}s`); 175 + logger.warn(`No events for ${Math.round(health.timeSinceLastEvent / 1000)}s`); 180 176 } 181 177 }, 30000); 182 178 } ··· 185 181 * Stop the firehose worker 186 182 */ 187 183 export function stopFirehose(): void { 188 - console.log('[Firehose] Stopping'); 184 + logger.info('Stopping firehose'); 189 185 isConnected = false; 190 186 firehoseHandle?.destroy(); 191 187 firehoseHandle = null;
+12 -12
apps/firehose-service/src/lib/revalidate-worker.ts
··· 1 1 import Redis from 'ioredis'; 2 2 import os from 'os'; 3 + import { createLogger } from '@wispplace/observability'; 3 4 import { config } from '../config'; 4 5 import { fetchSiteRecord, handleSiteCreateOrUpdate } from './cache-writer'; 5 6 7 + const logger = createLogger('firehose-service'); 6 8 const consumerName = process.env.WISP_REVALIDATE_CONSUMER || `${os.hostname()}:${process.pid}`; 7 9 const batchSize = Number.parseInt(process.env.WISP_REVALIDATE_BATCH_SIZE || '10', 10); 8 10 const claimIdleMs = Number.parseInt(process.env.WISP_REVALIDATE_CLAIM_IDLE_MS || '60000', 10); ··· 33 35 const reason = fields.reason || 'storage-miss'; 34 36 35 37 if (!did || !rkey) { 36 - console.warn('[Revalidate] Missing did/rkey in message', { id, fields }); 38 + logger.warn('[Revalidate] Missing did/rkey in message', { id, fields }); 37 39 await redis.xack(config.revalidateStream, config.revalidateGroup, id); 38 40 return; 39 41 } 40 42 41 - console.log(`[Revalidate] Received message ${id}: ${did}/${rkey} (${reason})`); 43 + logger.info(`[Revalidate] Received message ${id}: ${did}/${rkey} (${reason})`); 42 44 43 45 const record = await fetchSiteRecord(did, rkey); 44 46 if (!record) { 45 - console.warn(`[Revalidate] Site record not found on PDS: ${did}/${rkey}`); 47 + logger.warn(`[Revalidate] Site record not found on PDS: ${did}/${rkey}`); 46 48 await redis.xack(config.revalidateStream, config.revalidateGroup, id); 47 49 return; 48 50 } ··· 51 53 skipInvalidation: true, 52 54 }); 53 55 54 - console.log(`[Revalidate] Completed ${id}: ${did}/${rkey}`); 56 + logger.info(`[Revalidate] Completed ${id}: ${did}/${rkey}`); 55 57 await redis.xack(config.revalidateStream, config.revalidateGroup, id); 56 58 } 57 59 ··· 60 62 try { 61 63 await processMessage(id, rawFields); 62 64 } catch (err) { 63 - const error = err instanceof Error ? err : new Error(String(err)); 64 - console.error('[Revalidate] Failed to process message', { id, error: error.message, stack: error.stack }); 65 + logger.error('[Revalidate] Failed to process message', err, { id }); 65 66 } 66 67 } 67 68 } ··· 143 144 await claimStaleMessages(); 144 145 await readNewMessages(); 145 146 } catch (err) { 146 - const error = err instanceof Error ? err : new Error(String(err)); 147 - console.error('[Revalidate] Loop error', { error: error.message, stack: error.stack }); 147 + logger.error('[Revalidate] Loop error', err); 148 148 await new Promise((resolve) => setTimeout(resolve, 1000)); 149 149 } 150 150 } ··· 152 152 153 153 export async function startRevalidateWorker(): Promise<void> { 154 154 if (!config.redisUrl) { 155 - console.warn('[Revalidate] REDIS_URL not set; revalidate worker disabled'); 155 + logger.warn('[Revalidate] REDIS_URL not set; revalidate worker disabled'); 156 156 return; 157 157 } 158 158 159 159 if (running) return; 160 160 161 - console.log(`[Revalidate] Connecting to Redis: ${config.redisUrl}`); 161 + logger.info(`[Revalidate] Connecting to Redis: ${config.redisUrl}`); 162 162 redis = new Redis(config.redisUrl, { 163 163 maxRetriesPerRequest: 2, 164 164 enableReadyCheck: true, 165 165 }); 166 166 167 167 redis.on('error', (err) => { 168 - console.error('[Revalidate] Redis error:', err); 168 + logger.error('[Revalidate] Redis error', err); 169 169 }); 170 170 171 171 redis.on('ready', () => { 172 - console.log(`[Revalidate] Redis connected, stream: ${config.revalidateStream}, group: ${config.revalidateGroup}`); 172 + logger.info(`[Revalidate] Redis connected, stream: ${config.revalidateStream}, group: ${config.revalidateGroup}`); 173 173 }); 174 174 175 175 running = true;
+5 -2
apps/firehose-service/src/lib/storage.ts
··· 8 8 S3StorageTier, 9 9 DiskStorageTier, 10 10 } from '@wispplace/tiered-storage'; 11 + import { createLogger } from '@wispplace/observability'; 11 12 import { config } from '../config'; 13 + 14 + const logger = createLogger('firehose-service'); 12 15 13 16 // Create S3 tier (or fallback to disk for local dev) 14 17 let coldTier: S3StorageTier | DiskStorageTier; ··· 28 31 forcePathStyle: config.s3ForcePathStyle, 29 32 metadataBucket: config.s3MetadataBucket, 30 33 }); 31 - console.log('[Storage] Using S3 cold tier:', config.s3Bucket); 34 + logger.info('[Storage] Using S3 cold tier:', { bucket: config.s3Bucket }); 32 35 } else { 33 36 // Fallback to disk for local development 34 37 const cacheDir = process.env.CACHE_DIR || './cache/sites'; ··· 38 41 evictionPolicy: 'lru', 39 42 encodeColons: false, 40 43 }); 41 - console.log('[Storage] Using disk fallback (no S3_BUCKET configured):', cacheDir); 44 + logger.info('[Storage] Using disk fallback (no S3_BUCKET configured):', { cacheDir }); 42 45 } 43 46 44 47 // Identity serializers for raw binary data (no JSON transformation)
+9 -7
apps/hosting-service/src/index.ts
··· 1 1 import app from './server'; 2 2 import { serve } from '@hono/node-server'; 3 - import { initializeGrafanaExporters } from '@wispplace/observability'; 3 + import { initializeGrafanaExporters, createLogger } from '@wispplace/observability'; 4 4 import { mkdirSync, existsSync } from 'fs'; 5 5 import { startDomainCacheCleanup, stopDomainCacheCleanup, closeDatabase } from './lib/db'; 6 6 import { closeRevalidateQueue } from './lib/revalidate-queue'; 7 7 import { startCacheInvalidationSubscriber, stopCacheInvalidationSubscriber } from './lib/cache-invalidation'; 8 8 import { storage, getStorageConfig } from './lib/storage'; 9 + 10 + const logger = createLogger('hosting-service'); 9 11 10 12 // Initialize Grafana exporters if configured 11 13 initializeGrafanaExporters({ ··· 19 21 // Ensure cache directory exists 20 22 if (!existsSync(CACHE_DIR)) { 21 23 mkdirSync(CACHE_DIR, { recursive: true }); 22 - console.log('Created cache directory:', CACHE_DIR); 24 + logger.info('Created cache directory', { CACHE_DIR }); 23 25 } 24 26 25 27 // Start domain cache cleanup ··· 33 35 const BOOTSTRAP_HOT_LIMIT = process.env.BOOTSTRAP_HOT_LIMIT ? parseInt(process.env.BOOTSTRAP_HOT_LIMIT) : 100; 34 36 35 37 if (BOOTSTRAP_HOT_ON_STARTUP) { 36 - console.log(`🔥 Bootstrapping hot cache (top ${BOOTSTRAP_HOT_LIMIT} items)...`); 38 + logger.info(`Bootstrapping hot cache (top ${BOOTSTRAP_HOT_LIMIT} items)...`); 37 39 storage.bootstrapHot(BOOTSTRAP_HOT_LIMIT) 38 40 .then((loaded: number) => { 39 - console.log(`✅ Bootstrapped ${loaded} items into hot cache`); 41 + logger.info(`Bootstrapped ${loaded} items into hot cache`); 40 42 }) 41 43 .catch((err: unknown) => { 42 - console.error('❌ Hot cache bootstrap error:', err); 44 + logger.error('Hot cache bootstrap error', err); 43 45 }); 44 46 } 45 47 ··· 82 84 83 85 // Graceful shutdown 84 86 process.on('SIGINT', async () => { 85 - console.log('\n🛑 Shutting down...'); 87 + logger.info('Shutting down...'); 86 88 stopDomainCacheCleanup(); 87 89 await stopCacheInvalidationSubscriber(); 88 90 await closeRevalidateQueue(); ··· 92 94 }); 93 95 94 96 process.on('SIGTERM', async () => { 95 - console.log('\n🛑 Shutting down...'); 97 + logger.info('Shutting down...'); 96 98 stopDomainCacheCleanup(); 97 99 await stopCacheInvalidationSubscriber(); 98 100 await closeRevalidateQueue();
+6 -4
apps/hosting-service/src/server.ts
··· 7 7 import { cors } from 'hono/cors'; 8 8 import { getWispDomain, getCustomDomain, getCustomDomainByHash } from './lib/db'; 9 9 import { resolveDid } from './lib/utils'; 10 - import { logCollector, errorTracker, metricsCollector } from '@wispplace/observability'; 10 + import { createLogger, logCollector, errorTracker, metricsCollector } from '@wispplace/observability'; 11 11 import { observabilityMiddleware, observabilityErrorHandler } from '@wispplace/observability/middleware/hono'; 12 12 import { sanitizePath } from '@wispplace/fs-utils'; 13 13 import { isValidRkey, extractHeaders } from './lib/request-utils'; 14 14 import { serveFromCache, serveFromCacheWithRewrite } from './lib/file-serving'; 15 15 import { getRevalidateMetrics } from './lib/revalidate-metrics'; 16 + 17 + const logger = createLogger('hosting-service'); 16 18 17 19 const BASE_HOST_ENV = process.env.BASE_HOST || 'wisp.place'; 18 20 const BASE_HOST = BASE_HOST_ENV.split(':')[0] || BASE_HOST_ENV; ··· 43 45 const rawPath = url.pathname.replace(/^\//, ''); 44 46 const path = sanitizePath(rawPath); 45 47 46 - console.log(`[Server] Request: host=${hostname} hostnameWithoutPort=${hostnameWithoutPort} path=${path} BASE_HOST=${BASE_HOST}`); 48 + logger.debug(`Request: host=${hostname} hostnameWithoutPort=${hostnameWithoutPort} path=${path}`, { BASE_HOST }); 47 49 48 50 // Check if this is sites.wisp.place subdomain (strip port for comparison) 49 51 if (hostnameWithoutPort === `sites.${BASE_HOST}`) { ··· 86 88 return c.redirect(`${url.pathname}/${url.search}`, 301); 87 89 } 88 90 89 - console.log(`[Server] sites.wisp.place request: identifier=${identifier}, site=${site}, filePath=${filePath}`); 91 + logger.debug(`sites.wisp.place request: identifier=${identifier}, site=${site}, filePath=${filePath}`); 90 92 91 93 // Serve with HTML path rewriting to handle absolute paths 92 94 const basePath = `/${identifier}/${site}/`; 93 - console.log(`[Server] Serving with basePath: ${basePath}`); 95 + logger.debug(`Serving with basePath: ${basePath}`); 94 96 const headers = extractHeaders(c.req.raw.headers); 95 97 return serveFromCacheWithRewrite(did, site, filePath, basePath, c.req.url, headers); 96 98 }
+8 -3
bun.lock
··· 28 28 "@wispplace/database": "workspace:*", 29 29 "@wispplace/fs-utils": "workspace:*", 30 30 "@wispplace/lexicons": "workspace:*", 31 + "@wispplace/observability": "workspace:*", 31 32 "@wispplace/safe-fetch": "workspace:*", 32 33 "@wispplace/tiered-storage": "workspace:*", 33 34 "hono": "^4.10.4", ··· 135 136 }, 136 137 "cli": { 137 138 "name": "wispctl", 138 - "version": "1.0.6", 139 + "version": "1.0.8", 139 140 "bin": { 140 141 "wispctl": "dist/index.js", 141 142 }, ··· 1920 1921 1921 1922 "@typescript-eslint/typescript-estree/minimatch": ["minimatch@9.0.5", "", { "dependencies": { "brace-expansion": "^2.0.1" } }, "sha512-G6T0ZX48xgozx7587koeX9Ys2NYy6Gmv//P89sEte9V9whIapMNF4idKxnW2QtCcLiTWlb/wfCabAtAFWhhBow=="], 1922 1923 1923 - "@wispplace/bun-firehose/@types/bun": ["@types/bun@1.3.8", "", { "dependencies": { "bun-types": "1.3.8" } }, "sha512-3LvWJ2q5GerAXYxO2mffLTqOzEu5qnhEAlh48Vnu8WQfnmSwbgagjGZV6BoHKJztENYEDn6QmVd949W4uESRJA=="], 1924 + "@wispplace/bun-firehose/@types/bun": ["@types/bun@1.3.9", "", { "dependencies": { "bun-types": "1.3.9" } }, "sha512-KQ571yULOdWJiMH+RIWIOZ7B2RXQGpL1YQrBtLIV3FqDcCu6FsbFUBwhdKUlCKUpS3PJDsHlJ1QKlpxoVR+xtw=="], 1924 1925 1925 1926 "@wispplace/lexicons/@atproto/lexicon": ["@atproto/lexicon@0.5.2", "", { "dependencies": { "@atproto/common-web": "^0.4.4", "@atproto/syntax": "^0.4.1", "iso-datestring-validator": "^2.2.2", "multiformats": "^9.9.0", "zod": "^3.23.8" } }, "sha512-lRmJgMA8f5j7VB5Iu5cp188ald5FuI4FlmZ7nn6EBrk1dgOstWVrI5Ft6K3z2vjyLZRG6nzknlsw+tDP63p7bQ=="], 1926 1927 1927 1928 "@wispplace/main-app/@atproto/api": ["@atproto/api@0.17.7", "", { "dependencies": { "@atproto/common-web": "^0.4.3", "@atproto/lexicon": "^0.5.1", "@atproto/syntax": "^0.4.1", "@atproto/xrpc": "^0.7.5", "await-lock": "^2.2.2", "multiformats": "^9.9.0", "tlds": "^1.234.0", "zod": "^3.23.8" } }, "sha512-V+OJBZq9chcrD21xk1bUa6oc5DSKfQj5DmUPf5rmZncqL1w9ZEbS38H5cMyqqdhfgo2LWeDRdZHD0rvNyJsIaw=="], 1928 1929 1929 - "@wispplace/tiered-storage/@types/bun": ["@types/bun@1.3.8", "", { "dependencies": { "bun-types": "1.3.8" } }, "sha512-3LvWJ2q5GerAXYxO2mffLTqOzEu5qnhEAlh48Vnu8WQfnmSwbgagjGZV6BoHKJztENYEDn6QmVd949W4uESRJA=="], 1930 + "@wispplace/tiered-storage/@types/bun": ["@types/bun@1.3.9", "", { "dependencies": { "bun-types": "1.3.9" } }, "sha512-KQ571yULOdWJiMH+RIWIOZ7B2RXQGpL1YQrBtLIV3FqDcCu6FsbFUBwhdKUlCKUpS3PJDsHlJ1QKlpxoVR+xtw=="], 1930 1931 1931 1932 "accepts/mime-types": ["mime-types@2.1.35", "", { "dependencies": { "mime-db": "1.52.0" } }, "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw=="], 1932 1933 ··· 2058 2059 2059 2060 "@typescript-eslint/typescript-estree/minimatch/brace-expansion": ["brace-expansion@2.0.2", "", { "dependencies": { "balanced-match": "^1.0.0" } }, "sha512-Jt0vHyM+jmUBqojB7E1NIYadt0vI0Qxjxd2TErW94wDz+E2LAm5vKMXXwg6ZZBTHPuUlDgQHKXvjGBdfcF1ZDQ=="], 2060 2061 2062 + "@wispplace/bun-firehose/@types/bun/bun-types": ["bun-types@1.3.9", "", { "dependencies": { "@types/node": "*" } }, "sha512-+UBWWOakIP4Tswh0Bt0QD0alpTY8cb5hvgiYeWCMet9YukHbzuruIEeXC2D7nMJPB12kbh8C7XJykSexEqGKJg=="], 2063 + 2061 2064 "@wispplace/lexicons/@atproto/lexicon/multiformats": ["multiformats@9.9.0", "", {}, "sha512-HoMUjhH9T8DDBNT+6xzkrd9ga/XiBI4xLr58LJACwK6G3HTOPeMz4nB4KJs33L2BelrIJa7P0VuNaVF3hMYfjg=="], 2062 2065 2063 2066 "@wispplace/main-app/@atproto/api/@atproto/lexicon": ["@atproto/lexicon@0.5.2", "", { "dependencies": { "@atproto/common-web": "^0.4.4", "@atproto/syntax": "^0.4.1", "iso-datestring-validator": "^2.2.2", "multiformats": "^9.9.0", "zod": "^3.23.8" } }, "sha512-lRmJgMA8f5j7VB5Iu5cp188ald5FuI4FlmZ7nn6EBrk1dgOstWVrI5Ft6K3z2vjyLZRG6nzknlsw+tDP63p7bQ=="], 2064 2067 2065 2068 "@wispplace/main-app/@atproto/api/multiformats": ["multiformats@9.9.0", "", {}, "sha512-HoMUjhH9T8DDBNT+6xzkrd9ga/XiBI4xLr58LJACwK6G3HTOPeMz4nB4KJs33L2BelrIJa7P0VuNaVF3hMYfjg=="], 2069 + 2070 + "@wispplace/tiered-storage/@types/bun/bun-types": ["bun-types@1.3.9", "", { "dependencies": { "@types/node": "*" } }, "sha512-+UBWWOakIP4Tswh0Bt0QD0alpTY8cb5hvgiYeWCMet9YukHbzuruIEeXC2D7nMJPB12kbh8C7XJykSexEqGKJg=="], 2066 2071 2067 2072 "accepts/mime-types/mime-db": ["mime-db@1.52.0", "", {}, "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg=="], 2068 2073