#!/usr/bin/env python3 import sys import os current_dir = os.path.dirname(os.path.abspath(__file__)) parent_dir = os.path.dirname(os.path.dirname(os.path.dirname(current_dir))) sys.path.insert(0, parent_dir) import argparse import json import mimetypes import traceback from datetime import datetime import requests import sys import os from .config import * from .skill import skill from skills.smyx_common.scripts.util import RequestUtil # 从config导入常量 SUPPORTED_FORMATS = ConstantEnum.SUPPORTED_FORMATS MAX_FILE_SIZE_MB = ConstantEnum.MAX_FILE_SIZE_MB def validate_file(file_path): """验证输入文件是否合法""" if not os.path.exists(file_path): raise FileNotFoundError(f"文件不存在: {file_path}") if not os.access(file_path, os.R_OK): raise PermissionError(f"文件没有读权限: {file_path}") ext = os.path.splitext(file_path)[1].lower()[1:] if ext not in SUPPORTED_FORMATS: raise ValueError(f"不支持的文件格式,支持的格式: {', '.join(SUPPORTED_FORMATS)}") file_size_mb = os.path.getsize(file_path) / (1024 * 1024) if file_size_mb > MAX_FILE_SIZE_MB: raise ValueError(f"文件过大,最大支持 {MAX_FILE_SIZE_MB}MB,当前文件大小: {file_size_mb:.1f}MB") return True def analyze_media(input_path=None, url=None, media_type=None, threshold=None, api_url=None, api_key=None, output_level=None): """调用API进行人体情绪识别""" if not input_path and not url: raise ValueError("必须提供本地媒体路径(--input)或网络媒体URL(--url)") # 设置参数 if media_type: ConstantEnum.DEFAULT_MEDIA_TYPE = media_type if threshold: ConstantEnum.DEFAULT_THRESHOLD = threshold try: input_path = input_path or url # 携带额外参数 params = {} if threshold: params["threshold"] = threshold return skill.get_output_analysis(input_path, params) except requests.exceptions.RequestException as e: traceback.print_stack() raise Exception(f"API请求失败: {str(e)}") def show_analyze_list(open_id, start_time=None, end_time=None): # if not open_id: # raise ValueError("必须提供本用户的OpenId/UserId") try: output_content = skill.get_output_analysis_list() return output_content except requests.exceptions.RequestException as e: traceback.print_stack() raise Exception(f"API请求失败: {str(e)}") def get_analysis_export_url(request_id=None): """调用API分析视频""" if not request_id: return "" return ApiEnum.DETAIL_EXPORT_URL + request_id def format_result(result, output_level="standard", media_type="video", threshold=0.7): """格式化输出结果""" emotion_map = { "happy": "快乐", "sad": "悲伤", "depression": "抑郁", "neutral": "平静", "anger": "愤怒", "surprise": "惊讶", "fear": "恐惧" } if output_level == "json": result_id = None if result is not None: result_json = result result_id = result_json.get('id', {}) result_json = json.dumps(result_json.get('emotionRecognitionResponse', {}), ensure_ascii=False, indent=2) else: return "⚠️ 暂无分析结果" return f""" 📊 人体情绪识别分析结构化结果 {result_json} """, result_id elif output_level == "basic": # 精简输出 data = result.get('data', {}) recognition = data.get('recognition', {}) dominant_emotion = recognition.get('dominant_emotion', '未知') dominant_emotion_cn = emotion_map.get(dominant_emotion, dominant_emotion) abnormal_emotions = recognition.get('abnormal_emotions', []) return f""" 📊 人体情绪识别报告 {'=' * 40} 主导情绪: {dominant_emotion_cn} 异常情绪数: {len(abnormal_emotions)} 情绪评分: {recognition.get('overall_score', '未知')} """ elif output_level == "standard": # 标准输出 data = result.get('data', {}) recognition = data.get('recognition', {}) emotions = "\n".join( [f" {emotion_map.get(e.get('emotion'), e.get('emotion'))}: {e.get('probability'):.2f}" for e in recognition.get('emotions', [])]) abnormal_emotions = "\n".join( [f" ⚠️ {emotion_map.get(e, e)} (强度超过阈值)" for e in recognition.get('abnormal_emotions', [])]) dominant_emotion = recognition.get('dominant_emotion', '未知') dominant_emotion_cn = emotion_map.get(dominant_emotion, dominant_emotion) return f""" 📊 人体视觉情绪识别分析报告 {'=' * 50} ⏰ 识别时间: {data.get('recognition_time', '未知')} 📹 素材类型: {media_type} 🎯 异常阈值: {threshold} 🔍 识别结果: 主导情绪: {dominant_emotion_cn} 整体情绪评分: {recognition.get('overall_score', '未知')} 各情绪概率: {emotions if emotions else ' 未识别到有效情绪'} 异常情绪标记: {abnormal_emotions if abnormal_emotions else ' ✅ 未检测到异常情绪'} {'=' * 50} > 注:本报告仅供情绪状态参考,不能替代专业心理咨询和诊断,发现持续异常请及时寻求专业帮助。 """ else: # 完整输出(JSON格式) return json.dumps(result, ensure_ascii=False, indent=2) def main(): parser = argparse.ArgumentParser(description="人体视觉情绪识别工具") parser.add_argument("--input", help="本地视频/图片文件路径") parser.add_argument("--url", help="网络视频/图片的URL地址") parser.add_argument("--media-type", choices=["video", "image"], default=ConstantEnum.DEFAULT__MEDIA_TYPE, help="媒体类型:video(视频), image(图片),默认 video") parser.add_argument("--threshold", type=float, default=ConstantEnum.DEFAULT__THRESHOLD, help="异常情绪强度阈值,高于此分值标记为异常,默认 0.7") parser.add_argument("--open-id", required=True, help="当前用户的OpenID/UserId/用户名/手机号") parser.add_argument("--list", action='store_true', help="显示人体情绪识别列表清单") parser.add_argument("--api-url", help="服务端API地址") parser.add_argument("--api-key", help="API访问密钥(必需)") parser.add_argument("--output", help="结果输出文件路径") parser.add_argument("--detail", choices=["basic", "standard", "json"], default=ConstantEnum.DEFAULT__OUTPUT_LEVEL, help="输出详细程度") parser.add_argument("--export-env-only", action='store_true', help="仅输出 export 命令设置环境变量,不执行分析") args = parser.parse_args() try: if args.open_id: # 设置 Python 进程内的环境变量 ConstantEnumBase.CURRENT__OPEN_ID = args.open_id # 检查必需参数 if args.list: open_id = ConstantEnum.CURRENT__OPEN_ID result = show_analyze_list(open_id) print(result) exit(0) # 检查必需参数 if not args.input and not args.url: print("❌ 错误: 必须提供 --input 或 --url 参数") exit(1) print("🔍 正在进行人体情绪识别,请稍候...") output_content = analyze_media( input_path=args.input, url=args.url, media_type=args.media_type, threshold=args.threshold, api_url=args.api_url, api_key=args.api_key, output_level=args.detail ) print(output_content) # 保存到文件 if args.output: with open(args.output, "w", encoding="utf-8") as f: if args.detail == "full": json.dump(result, f, ensure_ascii=False, indent=2) else: f.write(output_content) print(f"✅ 结果已保存到: {args.output}") except Exception as e: traceback.print_stack() print(f"❌ 人体情绪识别失败: {str(e)}") exit(1) if __name__ == "__main__": main()