Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion bridge/agent_bridge.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ class AgentLLMModel(LLMModel):
("qwen", const.QWEN_DASHSCOPE), ("qwq", const.QWEN_DASHSCOPE), ("qvq", const.QWEN_DASHSCOPE),
("gemini", const.GEMINI), ("glm", const.ZHIPU_AI), ("claude", const.CLAUDEAPI),
("moonshot", const.MOONSHOT), ("kimi", const.MOONSHOT),
("doubao", const.DOUBAO), ("deepseek", const.DEEPSEEK),
("doubao", const.DOUBAO), (const.QINIU_DEFAULT_MODEL, const.QINIU), ("deepseek", const.DEEPSEEK),
]

def __init__(self, bridge: Bridge, bot_type: str = "chat"):
Expand Down
3 changes: 3 additions & 0 deletions bridge/bridge.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,9 @@ def __init__(self):

if model_type and model_type.startswith("deepseek"):
self.btype["chat"] = const.DEEPSEEK

if model_type in [const.QINIU_DEFAULT_MODEL]:
self.btype["chat"] = const.QINIU

if model_type in [const.MODELSCOPE]:
self.btype["chat"] = const.MODELSCOPE
Expand Down
11 changes: 9 additions & 2 deletions channel/web/web_channel.py
Original file line number Diff line number Diff line change
Expand Up @@ -845,6 +845,13 @@ class ConfigHandler:
"api_base_default": "https://api.moonshot.cn/v1",
"models": [const.KIMI_K2_6, const.KIMI_K2_5, const.KIMI_K2],
}),
("qiniu", {
"label": "七牛云",
"api_key_field": "qiniu_api_key",
"api_base_key": "qiniu_api_base",
"api_base_default": "https://api.qnaigc.com/v1",
"models": [const.QINIU_DEFAULT_MODEL],
}),
("modelscope", {
"label": "ModelScope",
"api_key_field": "modelscope_api_key",
Expand All @@ -870,9 +877,9 @@ class ConfigHandler:

EDITABLE_KEYS = {
"model", "bot_type", "use_linkai",
"open_ai_api_base", "deepseek_api_base", "claude_api_base", "gemini_api_base",
"open_ai_api_base", "deepseek_api_base", "qiniu_api_base", "claude_api_base", "gemini_api_base",
"zhipu_ai_api_base", "moonshot_base_url", "ark_base_url", "custom_api_base",
"open_ai_api_key", "deepseek_api_key", "claude_api_key", "gemini_api_key",
"open_ai_api_key", "deepseek_api_key", "qiniu_api_key", "claude_api_key", "gemini_api_key",
"zhipu_ai_api_key", "dashscope_api_key", "moonshot_api_key",
"ark_api_key", "minimax_api_key", "linkai_api_key", "custom_api_key",
"agent_max_context_tokens", "agent_max_context_turns", "agent_max_steps",
Expand Down
7 changes: 7 additions & 0 deletions common/const.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
MOONSHOT = "moonshot"
MiniMax = "minimax"
DEEPSEEK = "deepseek"
QINIU = "qiniu"
CUSTOM = "custom" # custom OpenAI-compatible API, bot_type won't auto-switch on model change
MODELSCOPE = "modelscope"

Expand Down Expand Up @@ -85,6 +86,9 @@
DEEPSEEK_V4_FLASH = "deepseek-v4-flash" # DeepSeek V4 Flash - 默认推荐 (思考模式 + 工具调用)
DEEPSEEK_V4_PRO = "deepseek-v4-pro" # DeepSeek V4 Pro - 复杂任务更强 (思考模式 + 工具调用)

# Qiniu (qnaigc.com OpenAI-compatible gateway; default placeholder model when list not available)
QINIU_DEFAULT_MODEL = "deepseek-v3"

# Qwen (通义千问 - 阿里云 DashScope)
QWEN_TURBO = "qwen-turbo"
QWEN_PLUS = "qwen-plus"
Expand Down Expand Up @@ -180,6 +184,9 @@
GPT_5, GPT_5_MINI, GPT_5_NANO,
GPT_54, GPT_54_MINI, GPT_54_NANO,
O1, O1_MINI,

# Qiniu
QINIU_DEFAULT_MODEL,

# GLM (智谱AI)
ZHIPU_AI, GLM_5_1, GLM_5_TURBO, GLM_5, GLM_4, GLM_4_PLUS, GLM_4_flash, GLM_4_LONG, GLM_4_ALLTOOLS,
Expand Down
5 changes: 5 additions & 0 deletions config.py
Original file line number Diff line number Diff line change
Expand Up @@ -196,6 +196,9 @@
"minimax_api_key": "",
"Minimax_group_id": "",
"Minimax_base_url": "",
# 七牛云 MaaS(OpenAI 兼容网关)
"qiniu_api_key": "",
"qiniu_api_base": "https://api.qnaigc.com/v1",
"deepseek_api_key": "",
"deepseek_api_base": "https://api.deepseek.com/v1",
"web_port": 9899,
Expand Down Expand Up @@ -384,6 +387,8 @@ def load_config():
"gemini_api_base": "GEMINI_API_BASE",
"minimax_api_key": "MINIMAX_API_KEY",
"minimax_api_base": "MINIMAX_API_BASE",
"qiniu_api_key": "QINIU_API_KEY",
"qiniu_api_base": "QINIU_API_BASE",
"deepseek_api_key": "DEEPSEEK_API_KEY",
"deepseek_api_base": "DEEPSEEK_API_BASE",
"zhipu_ai_api_key": "ZHIPU_AI_API_KEY",
Expand Down
4 changes: 4 additions & 0 deletions models/bot_factory.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,10 @@ def create_bot(bot_type):
from models.deepseek.deepseek_bot import DeepSeekBot
return DeepSeekBot()

elif bot_type == const.QINIU:
from models.qiniu.qiniu_bot import QiniuBot
return QiniuBot()

elif bot_type in (const.OPENAI, const.CHATGPT, const.CUSTOM): # OpenAI-compatible API
from models.chatgpt.chat_gpt_bot import ChatGPTBot
return ChatGPTBot()
Expand Down
160 changes: 160 additions & 0 deletions models/qiniu/qiniu_bot.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,160 @@
# encoding:utf-8

"""
Qiniu Bot — OpenAI-compatible chat at api.qnaigc.com; uses dedicated API key / base config.
"""

import time

import requests
from models.bot import Bot
from models.openai_compatible_bot import OpenAICompatibleBot
from models.session_manager import SessionManager
from bridge.context import ContextType
from bridge.reply import Reply, ReplyType
from common import const
from common.log import logger
from config import conf, load_config
from .qiniu_session import QiniuSession

DEFAULT_API_BASE = "https://api.qnaigc.com/v1"


class QiniuBot(Bot, OpenAICompatibleBot):
def __init__(self):
super().__init__()
_m = conf().get("model") or const.QINIU_DEFAULT_MODEL
self.sessions = SessionManager(
QiniuSession,
model=_m,
)
conf_model = conf().get("model") or const.QINIU_DEFAULT_MODEL
self.args = {
"model": conf_model,
"temperature": conf().get("temperature", 0.7),
"top_p": conf().get("top_p", 1.0),
"frequency_penalty": conf().get("frequency_penalty", 0.0),
"presence_penalty": conf().get("presence_penalty", 0.0),
}

@property
def api_key(self):
return conf().get("qiniu_api_key") or conf().get("open_ai_api_key")

@property
def api_base(self):
url = (
conf().get("qiniu_api_base")
or conf().get("open_ai_api_base")
or DEFAULT_API_BASE
)
return url.rstrip("/")

def get_api_config(self):
"""OpenAICompatibleBot interface — used by call_with_tools()."""
return {
"api_key": self.api_key,
"api_base": self.api_base,
"model": conf().get("model", const.QINIU_DEFAULT_MODEL),
"default_temperature": conf().get("temperature", 0.7),
"default_top_p": conf().get("top_p", 1.0),
"default_frequency_penalty": conf().get("frequency_penalty", 0.0),
"default_presence_penalty": conf().get("presence_penalty", 0.0),
}

def reply(self, query, context=None):
if context.type == ContextType.TEXT:
logger.info("[QINIU] query={}".format(query))

session_id = context["session_id"]
reply = None
clear_memory_commands = conf().get("clear_memory_commands", ["#清除记忆"])
if query in clear_memory_commands:
self.sessions.clear_session(session_id)
reply = Reply(ReplyType.INFO, "记忆已清除")
elif query == "#清除所有":
self.sessions.clear_all_session()
reply = Reply(ReplyType.INFO, "所有人记忆已清除")
elif query == "#更新配置":
load_config()
reply = Reply(ReplyType.INFO, "配置已更新")
if reply:
return reply

session = self.sessions.session_query(query, session_id)
logger.debug("[QINIU] session query={}".format(session.messages))

new_args = self.args.copy()
reply_content = self.reply_text(session, args=new_args)
logger.debug(
"[QINIU] new_query={}, session_id={}, reply_cont={}, completion_tokens={}".format(
session.messages, session_id,
reply_content["content"], reply_content["completion_tokens"],
)
)
if reply_content["completion_tokens"] == 0 and len(reply_content["content"]) > 0:
reply = Reply(ReplyType.ERROR, reply_content["content"])
elif reply_content["completion_tokens"] > 0:
self.sessions.session_reply(
reply_content["content"], session_id, reply_content["total_tokens"],
)
reply = Reply(ReplyType.TEXT, reply_content["content"])
else:
reply = Reply(ReplyType.ERROR, reply_content["content"])
logger.debug("[QINIU] reply {} used 0 tokens.".format(reply_content))
return reply
else:
reply = Reply(ReplyType.ERROR, "Bot不支持处理{}类型的消息".format(context.type))
return reply

def reply_text(self, session, args=None, retry_count: int = 0) -> dict:
try:
headers = {
"Content-Type": "application/json",
"Authorization": "Bearer " + self.api_key,
}
body = args.copy()
body["messages"] = session.messages

res = requests.post(
f"{self.api_base}/chat/completions",
headers=headers,
json=body,
timeout=180,
)
if res.status_code == 200:
response = res.json()
return {
"total_tokens": response["usage"]["total_tokens"],
"completion_tokens": response["usage"]["completion_tokens"],
"content": response["choices"][0]["message"]["content"],
}
else:
try:
response = res.json()
except Exception:
response = {}
error = response.get("error", {})
logger.error(
f"[QINIU] chat failed, status_code={res.status_code}, "
f"msg={error.get('message')}, type={error.get('type')}"
)
result = {"completion_tokens": 0, "content": "提问太快啦,请休息一下再问我吧"}
need_retry = False
if res.status_code >= 500:
need_retry = retry_count < 2
elif res.status_code == 401:
result["content"] = "授权失败,请检查API Key是否正确"
elif res.status_code == 429:
result["content"] = "请求过于频繁,请稍后再试"
need_retry = retry_count < 2

if need_retry:
time.sleep(3)
return self.reply_text(session, args, retry_count + 1)
return result
except Exception as e:
logger.exception(e)
if retry_count < 2:
return self.reply_text(session, args, retry_count + 1)
return {"completion_tokens": 0, "content": "我现在有点累了,等会再来吧"}
60 changes: 60 additions & 0 deletions models/qiniu/qiniu_session.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
from models.session_manager import Session
from common import const
from common.log import logger


class QiniuSession(Session):
def __init__(self, session_id, system_prompt=None, model=None):
if model is None:
model = const.QINIU_DEFAULT_MODEL
super().__init__(session_id, system_prompt)
self.model = model
self.reset()

def discard_exceeding(self, max_tokens, cur_tokens=None):
precise = True
try:
cur_tokens = self.calc_tokens()
except Exception as e:
precise = False
if cur_tokens is None:
raise e
logger.debug("Exception when counting tokens precisely for query: {}".format(e))
while cur_tokens > max_tokens:
if len(self.messages) > 2:
self.messages.pop(1)
elif len(self.messages) == 2 and self.messages[1]["role"] == "assistant":
self.messages.pop(1)
if precise:
cur_tokens = self.calc_tokens()
else:
cur_tokens = cur_tokens - max_tokens
break
elif len(self.messages) == 2 and self.messages[1]["role"] == "user":
logger.warn("user message exceed max_tokens. total_tokens={}".format(cur_tokens))
break
else:
logger.debug("max_tokens={}, total_tokens={}, len(messages)={}".format(
max_tokens, cur_tokens, len(self.messages)))
break
if precise:
cur_tokens = self.calc_tokens()
else:
cur_tokens = cur_tokens - max_tokens
return cur_tokens

def calc_tokens(self):
return num_tokens_from_messages(self.messages, self.model)


def num_tokens_from_messages(messages, model):
tokens = 0
for msg in messages:
content = msg.get("content", "")
if isinstance(content, str):
tokens += len(content)
elif isinstance(content, list):
for block in content:
if isinstance(block, dict):
tokens += len(block.get("text", ""))
return tokens
1 change: 1 addition & 0 deletions plugins/cow_cli/cow_cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -437,6 +437,7 @@ def _resolve_bot_type_for_model(model_name: str) -> str:
const.MOONSHOT: const.MOONSHOT,
"moonshot-v1-8k": const.MOONSHOT, "moonshot-v1-32k": const.MOONSHOT,
"moonshot-v1-128k": const.MOONSHOT,
const.QINIU_DEFAULT_MODEL: const.QINIU,
}
_PREFIX = [
("qwen", const.QWEN_DASHSCOPE), ("qwq", const.QWEN_DASHSCOPE),
Expand Down
2 changes: 1 addition & 1 deletion plugins/godcmd/godcmd.py
Original file line number Diff line number Diff line change
Expand Up @@ -342,7 +342,7 @@ def on_handle_context(self, e_context: EventContext):
elif cmd == "resetall":
if bottype in [const.OPEN_AI, const.OPENAI, const.CHATGPT, const.CHATGPTONAZURE, const.LINKAI,
const.BAIDU, const.XUNFEI, const.QWEN, const.QWEN_DASHSCOPE, const.GEMINI, const.ZHIPU_AI, const.MOONSHOT,
const.MODELSCOPE]:
const.MODELSCOPE, const.DEEPSEEK, const.QINIU]:
channel.cancel_all_session()
bot.sessions.clear_all_session()
ok, result = True, "重置所有会话成功"
Expand Down
22 changes: 22 additions & 0 deletions tests/test_qiniu_provider.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
# encoding: utf-8
"""Smoke tests for Qiniu MaaS provider registration."""

import unittest

from common import const
from models.bot_factory import create_bot


class TestQiniuProvider(unittest.TestCase):
def test_qiniu_constant_and_default_model(self):
self.assertEqual(const.QINIU, "qiniu")
self.assertEqual(const.QINIU_DEFAULT_MODEL, "deepseek-v3")
self.assertIn(const.QINIU_DEFAULT_MODEL, const.MODEL_LIST)

def test_create_bot_qiniu(self):
bot = create_bot(const.QINIU)
self.assertEqual(type(bot).__name__, "QiniuBot")


if __name__ == "__main__":
unittest.main()