{"skill":{"slug":"prompt-injection-tester","displayName":"Prompt Injection Tester","summary":"Test LLM applications for prompt injection vulnerabilities — run attack simulations, evaluate defenses, and generate hardening recommendations for AI systems.","tags":{"latest":"1.0.0"},"stats":{"comments":0,"downloads":33,"installsAllTime":0,"installsCurrent":0,"stars":0,"versions":1},"createdAt":1777509767573,"updatedAt":1777510610001},"latestVersion":{"version":"1.0.0","createdAt":1777509767573,"changelog":"Initial release – provides a comprehensive toolkit for evaluating prompt injection vulnerabilities in LLM applications.\n\n- Runs attack simulations across multiple injection categories (direct, indirect, extraction, functional, and more).\n- Profiles LLM application architecture and existing defenses.\n- Scores vulnerabilities and generates detailed security reports with prioritized recommendations.\n- Offers actionable hardening advice for prompts, sanitization, and system architecture.\n- Intended for authorized red-teaming and security auditing of AI-powered apps.","license":"MIT-0"},"metadata":null,"owner":{"handle":"charlie-morrison","userId":"s17cttbdxry5kkyafjw983mq8s83p4y3","displayName":"charlie-morrison","image":"https://avatars.githubusercontent.com/u/271589886?v=4"},"moderation":{"isSuspicious":true,"isMalwareBlocked":false,"verdict":"suspicious","reasonCodes":["suspicious.prompt_injection_instructions"],"summary":"Detected: suspicious.prompt_injection_instructions","engineVersion":"v2.4.2","updatedAt":1777510610001}}