Install
openclaw skills install csam-shieldClawHub Security found sensitive or high-impact capabilities. Review the scan results before using.
Detects, blocks, and reports Child Sexual Abuse Material using AI-driven image, video, and behavior analysis with automatic NCMEC reporting and evidence pres...
openclaw skills install csam-shield@raghulpasupathi/csam-shieldCRITICAL SAFETY SYSTEM for detecting and preventing Child Sexual Abuse Material (CSAM). Uses advanced computer vision, hash matching, age estimation, and behavior analysis to identify illegal content. Includes automatic NCMEC reporting, law enforcement coordination, and evidence preservation.
This skill handles the most serious form of online abuse. Implementation requires:
https://clawhub.ai/raghulpasupathi/csam-shield
npm install @raghulpasupathi/csam-shield
{
"enabled": true,
"settings": {
"mode": "maximum-protection",
"zeroTolerance": true,
"thresholds": {
"ageEstimation": {
"childThreshold": 13,
"teenThreshold": 18,
"confidence": 0.85
},
"hashMatch": {
"exactMatch": 0.95,
"nearMatch": 0.85
},
"anatomicalDetection": {
"sensitivity": "maximum",
"blockThreshold": 0.70
},
"contextAnalysis": {
"enabled": true,
"legitimateExceptions": ["family", "medical", "educational"]
}
},
"databases": {
"photoDNA": {
"enabled": true,
"provider": "microsoft",
"updateFrequency": "hourly"
},
"pdqHash": {
"enabled": true,
"provider": "facebook",
"updateFrequency": "hourly"
},
"ncmec": {
"enabled": true,
"hashList": true,
"updateFrequency": "hourly"
},
"custom": {
"enabled": true,
"path": "/secure/csam-hashes/"
}
},
"detection": {
"imageAnalysis": true,
"videoAnalysis": true,
"textAnalysis": true,
"metadataAnalysis": true,
"networkAnalysis": true,
"behaviorAnalysis": true
},
"reporting": {
"ncmec": {
"enabled": true,
"endpoint": "https://report.cybertip.org/",
"apiKey": "${NCMEC_API_KEY}",
"automatic": true
},
"lawEnforcement": {
"enabled": true,
"contacts": ["fbi_tips", "local_police"],
"automatic": false,
"requiresReview": true
},
"preserveEvidence": true,
"evidenceRetention": "indefinite",
"encryptEvidence": true
},
"actions": {
"onDetection": [
"block_content",
"suspend_user",
"preserve_evidence",
"report_ncmec",
"alert_security_team",
"block_ip",
"flag_related_accounts"
],
"onHashMatch": [
"immediate_block",
"auto_report_ncmec",
"permanent_ban",
"preserve_all_user_content",
"notify_authorities"
]
},
"security": {
"accessControl": "restricted",
"auditLogging": "complete",
"encryption": "aes-256",
"staffProtection": true,
"limitedExposure": true
}
}
}
const CSAMShield = require('@raghulpasupathi/csam-shield');
// Initialize with strict security
const shield = new CSAMShield({
mode: 'maximum-protection',
ncmecApiKey: process.env.NCMEC_API_KEY,
encryptionKey: process.env.EVIDENCE_ENCRYPTION_KEY
});
// ⚠️ CRITICAL: Analyze content (use with extreme caution)
const result = await shield.analyze('/path/to/content.jpg');
console.log(result);
/* Output:
{
threat: 'CRITICAL',
action: 'IMMEDIATE_BLOCK',
detectionType: 'hash_match',
confidence: 0.98,
details: {
hashMatch: {
matched: true,
database: 'photoDNA',
matchConfidence: 0.99
},
ageEstimation: {
estimatedAge: 10,
confidence: 0.94,
isMinor: true
},
anatomicalDetection: {
inappropriate: true,
severity: 'extreme'
},
context: {
isLegitimate: false,
category: 'exploitative'
}
},
actions: {
contentBlocked: true,
userSuspended: true,
evidencePreserved: true,
ncmecReported: true,
reportId: 'NCMEC-2026-xxxxx',
authoritiesNotified: true
},
evidence: {
caseId: 'CASE-2026-xxxxx',
preservedData: [
'content_hash',
'user_info',
'upload_metadata',
'ip_address',
'device_info'
],
encryptedStorage: '/secure/evidence/CASE-2026-xxxxx/'
},
timestamp: '2026-02-20T10:30:00Z'
}
*/
// Check hash against known CSAM databases
const hashCheck = await shield.checkHash(contentHash);
console.log(hashCheck);
/* Output:
{
isKnownCSAM: true,
matchedDatabases: ['photoDNA', 'pdqHash', 'ncmec'],
matchConfidence: 0.99,
action: 'IMMEDIATE_BLOCK',
reportRequired: true
}
*/
// Estimate age in image
const ageEstimation = await shield.estimateAge('/path/to/image.jpg');
console.log(ageEstimation);
/* Output:
{
estimatedAge: 12,
confidence: 0.91,
ageRange: [10, 14],
isMinor: true,
certaintyLevel: 'high'
}
*/
// Analyze user behavior for grooming patterns
const behaviorAnalysis = await shield.analyzeBehavior(userId, {
messages: userMessages,
interactions: userInteractions,
timeline: activityTimeline
});
console.log(behaviorAnalysis);
/* Output:
{
isGrooming: true,
confidence: 0.87,
patterns: [
'age_inquiries',
'isolation_attempts',
'gift_offering',
'secrecy_requests',
'progressive_boundary_crossing'
],
riskLevel: 'extreme',
recommendedAction: 'immediate_investigation'
}
*/
// Report to NCMEC CyberTipline
const ncmecReport = await shield.reportToNCMEC({
content: contentDetails,
user: userDetails,
evidence: preservedEvidence
});
console.log(ncmecReport);
/* Output:
{
success: true,
reportId: 'NCMEC-2026-xxxxx',
timestamp: '2026-02-20T10:30:00Z',
status: 'submitted',
followUp: 'pending_review'
}
*/
// Preserve evidence for legal proceedings
const evidence = await shield.preserveEvidence({
contentId: 'content-123',
userId: 'user-456',
includeMetadata: true,
includeRelatedContent: true,
includeUserHistory: true
});
// Suspend user and related accounts
await shield.suspendUser(userId, {
reason: 'CSAM_DETECTION',
permanent: true,
blockRelatedAccounts: true,
preserveEvidence: true
});
// Network analysis to find related accounts
const network = await shield.analyzeNetwork(userId);
console.log(network);
/* Output:
{
suspiciousAccounts: [
{ userId: 'user-789', riskScore: 0.92, connection: 'frequent_messages' },
{ userId: 'user-012', riskScore: 0.85, connection: 'content_sharing' }
],
distributionRing: {
detected: true,
size: 7,
accounts: [...]
},
recommendedActions: [
'investigate_all_accounts',
'preserve_all_evidence',
'notify_authorities'
]
}
*/
// Secure hash generation (for reporting only)
const secureHash = await shield.generateSecureHash('/path/to/content.jpg');
// Update hash databases
await shield.updateHashDatabases();
// Event listeners (CRITICAL - requires immediate response)
shield.on('csam_detected', async (detection) => {
console.error('🚨 CRITICAL: CSAM DETECTED');
// Immediate actions
await shield.blockContent(detection.contentId);
await shield.suspendUser(detection.userId);
await shield.preserveEvidence(detection);
await shield.reportToNCMEC(detection);
await shield.notifySecurityTeam(detection);
await shield.alertAuthorities(detection);
});
shield.on('hash_match', async (match) => {
console.error('🚨 CRITICAL: Known CSAM hash matched');
// Automatic immediate actions
await shield.executeEmergencyProtocol(match);
});
shield.on('grooming_detected', async (behavior) => {
console.warn('⚠️ WARNING: Potential grooming behavior detected');
// Investigation and monitoring
await shield.flagForInvestigation(behavior.userId);
await shield.enhanceMonitoring(behavior.userId);
});
// Secure audit logging
const auditLog = await shield.getAuditLog({
type: 'csam_detection',
timeRange: 'last_30_days',
includeReports: true
});
// Staff protection - limited exposure mode
shield.enableStaffProtection({
blurContent: true,
limitedDetails: true,
rotationSchedule: true,
mentalHealthSupport: true
});
// Compliance reporting
const complianceReport = await shield.generateComplianceReport({
period: 'monthly',
includeStatistics: true,
includeActions: true,
format: 'legal'
});
Problem: Legitimate content flagged as CSAM Solution:
Problem: Hash databases not catching known content Solution:
Problem: Reports not submitting to NCMEC Solution:
Problem: Age estimation giving unreliable results Solution:
Problem: Evidence not being preserved correctly Solution:
// ⚠️ CRITICAL SYSTEM INTEGRATION
const express = require('express');
const multer = require('multer');
const CSAMShield = require('@raghulpasupathi/csam-shield');
const app = express();
const upload = multer({ dest: '/secure/temp/' });
const shield = new CSAMShield({
mode: 'maximum-protection',
ncmecApiKey: process.env.NCMEC_API_KEY
});
// Critical: Pre-upload hash check
app.post('/api/upload', upload.single('file'), async (req, res) => {
const tempPath = req.file.path;
try {
// Generate hash immediately
const contentHash = await shield.generateSecureHash(tempPath);
// Check against known CSAM databases FIRST
const hashCheck = await shield.checkHash(contentHash);
if (hashCheck.isKnownCSAM) {
// CRITICAL: Known CSAM detected
console.error('🚨 CRITICAL: Known CSAM hash matched');
// Preserve evidence
await shield.preserveEvidence({
contentHash,
userId: req.user.id,
ip: req.ip,
uploadAttempt: true,
timestamp: new Date()
});
// Automatic NCMEC report
await shield.reportToNCMEC({
type: 'known_csam_upload',
hash: contentHash,
user: req.user,
ip: req.ip
});
// Suspend user immediately
await shield.suspendUser(req.user.id, {
reason: 'CSAM_UPLOAD',
permanent: true
});
// Delete file securely
await shield.secureDelete(tempPath);
// DO NOT reveal reason to user
return res.status(400).json({
success: false,
error: 'Upload failed. Please contact support.'
});
}
// Perform full analysis
const analysis = await shield.analyze(tempPath);
if (analysis.threat === 'CRITICAL') {
// New CSAM detected
console.error('🚨 CRITICAL: Potential CSAM detected');
// Execute emergency protocol
await shield.executeEmergencyProtocol({
content: tempPath,
user: req.user,
analysis: analysis
});
// DO NOT reveal reason to user
return res.status(400).json({
success: false,
error: 'Upload failed. Please contact support.'
});
}
// Content passed all checks
const url = await uploadToStorage(tempPath);
res.json({
success: true,
url: url
});
} catch (error) {
console.error('CSAM Shield error:', error);
// Fail closed - reject upload
res.status(500).json({
success: false,
error: 'Upload failed. Please try again.'
});
} finally {
// Always clean up temp file
if (fs.existsSync(tempPath)) {
await shield.secureDelete(tempPath);
}
}
});
// Background monitoring of existing content
async function scanExistingContent() {
console.log('Starting periodic content scan...');
const contentBatch = await getContentForScanning(1000);
for (const content of contentBatch) {
try {
const hash = await shield.generateSecureHash(content.url);
const check = await shield.checkHash(hash);
if (check.isKnownCSAM) {
console.error(`🚨 CRITICAL: Known CSAM found in existing content: ${content.id}`);
// Execute emergency protocol
await shield.executeEmergencyProtocol({
contentId: content.id,
userId: content.userId,
discoveryMethod: 'periodic_scan'
});
}
} catch (error) {
console.error(`Error scanning content ${content.id}:`, error);
}
}
}
// Run hourly scans
setInterval(scanExistingContent, 60 * 60 * 1000);
// Admin dashboard (RESTRICTED ACCESS)
app.get('/admin/csam/dashboard', requireSecurityClearance, async (req, res) => {
const stats = await shield.getStats({
period: '30d',
includeReports: true
});
res.json({
success: true,
stats: stats,
warning: 'RESTRICTED: Security clearance required'
});
});
// Compliance reporting (LEGAL TEAM ONLY)
app.get('/legal/csam/compliance-report', requireLegalAccess, async (req, res) => {
const report = await shield.generateComplianceReport({
period: req.query.period || 'monthly',
format: 'legal'
});
res.json({
success: true,
report: report
});
});
Working with CSAM detection is traumatic. Provide: