charSLee013
feat: complete Hugging Face Spaces deployment with production-ready CognitiveKernel-Launchpad
1ea26af
# Cognitive Kernel-Pro - 最小配置示例
# 只需要配置核心模型即可运行
[ck.model]
# 主模型配置 - 必需
call_target = "https://api-inference.modelscope.cn/v1/chat/completions"
model = "Qwen/Qwen3-235B-A22B-Instruct-2507"
# 可选:模型参数
[ck.model.extract_body]
temperature = 0.6
max_tokens = 8192
[web]
# Web代理配置``
max_steps = 20
use_multimodal = "auto" # off | yes | auto
[web.model]
# Web代理模型配置
call_target = "https://api-inference.modelscope.cn/v1/chat/completions"
model = "Qwen/Qwen3-235B-A22B-Instruct-2507"
request_timeout = 600
max_retry_times = 5
max_token_num = 8192
[web.model.extract_body]
# Web模型参数
temperature = 0.0
top_p = 0.95
max_tokens = 8192
[web.model_multimodal]
call_target = "https://api-inference.modelscope.cn/v1/chat/completions"
model = "Qwen/Qwen2.5-VL-72B-Instruct" # 或其他支持视觉的模型
request_timeout = 600
max_retry_times = 5
max_token_num = 8192
[web.model_multimodal.extract_body]
temperature = 0.0
top_p = 0.95
max_tokens = 8192
[file]
# 文件代理配置
max_steps = 16
max_file_read_tokens = 3000
max_file_screenshots = 2
[file.model]
# 文件代理模型配置
call_target = "https://api-inference.modelscope.cn/v1/chat/completions"
model = "Qwen/Qwen3-235B-A22B-Instruct-2507"
request_timeout = 600
max_retry_times = 5
max_token_num = 8192
[file.model.extract_body]
temperature = 0.3
top_p = 0.95
max_tokens = 8192
[file.model_multimodal]
# 文件多模态模型配置 - 与web_agent相同的视觉模型
call_target = "https://api-inference.modelscope.cn/v1/chat/completions"
model = "Qwen/Qwen2.5-VL-72B-Instruct" # 与web_agent相同的视觉模型
request_timeout = 600
max_retry_times = 5
max_token_num = 8192
[file.model_multimodal.extract_body]
temperature = 0.0
top_p = 0.95
max_tokens = 8192
# 可选:搜索后端
[search]
backend = "duckduckgo" # duckduckgo | google