diff --git a/.github/quickstarts/windows/config.example.go-cqhttp.cfg b/.github/quickstarts/windows/config.example.go-cqhttp.cfg
new file mode 100644
index 00000000..4b406215
--- /dev/null
+++ b/.github/quickstarts/windows/config.example.go-cqhttp.cfg
@@ -0,0 +1,28 @@
+# 这里是 ChatGPT for QQ 的所有配置文件
+# 请注意:以 "#" 开头的文本均为注释
+# 不会被程序读取
+# 如果你想要使用某个设置,请确保前面没有 "#" 号
+
+########################
+# 配置文件编写教程:
+# https://chatgpt-qq.lss233.com/
+########################
+[onebot]
+manager_qq = 请修改为机器人管理员的QQ号(你本人的 QQ 号)
+
+[openai]
+# 如果你想添加 Claude、Bing 等 AI,请阅读【教程】
+
+[[openai.accounts]]
+access_token = "这里填写你的 access_token(其他接入方式请看教程)"
+# 国内用户可能需要配置代理
+# proxy="http://127.0.0.1:7890"
+
+[presets]
+# 切换预设的命令: 加载预设 猫娘
+command = "加载预设 (\\w+)"
+
+[presets.keywords]
+# 预设关键词 <-> 实际文件
+"聊天" = "presets/issue402.txt"
+"猫娘" = "presets/catgirl.txt"
diff --git a/.github/quickstarts/windows/go-cqhttp/config.yml b/.github/quickstarts/windows/go-cqhttp/config.yml
new file mode 100644
index 00000000..26da0b8f
--- /dev/null
+++ b/.github/quickstarts/windows/go-cqhttp/config.yml
@@ -0,0 +1,107 @@
+# go-cqhttp 默认配置文件
+
+account: # 账号相关
+ uin: YOUR_BOT_QQ_HERE # QQ账号
+ password: '' # 密码为空时使用扫码登录
+ encrypt: false # 是否开启密码加密
+ status: 0 # 在线状态 请参考 https://docs.go-cqhttp.org/guide/config.html#在线状态
+ relogin: # 重连设置
+ delay: 3 # 首次重连延迟, 单位秒
+ interval: 3 # 重连间隔
+ max-times: 0 # 最大重连次数, 0为无限制
+
+ # 是否使用服务器下发的新地址进行重连
+ # 注意, 此设置可能导致在海外服务器上连接情况更差
+ use-sso-address: true
+ # 是否允许发送临时会话消息
+ allow-temp-session: false
+
+heartbeat:
+ # 心跳频率, 单位秒
+ # -1 为关闭心跳
+ interval: 5
+
+message:
+ # 上报数据类型
+ # 可选: string,array
+ post-format: string
+ # 是否忽略无效的CQ码, 如果为假将原样发送
+ ignore-invalid-cqcode: true
+ # 是否强制分片发送消息
+ # 分片发送将会带来更快的速度
+ # 但是兼容性会有些问题
+ force-fragment: false
+ # 是否将url分片发送
+ fix-url: false
+ # 下载图片等请求网络代理
+ proxy-rewrite: ''
+ # 是否上报自身消息
+ report-self-message: false
+ # 移除服务端的Reply附带的At
+ remove-reply-at: false
+ # 为Reply附加更多信息
+ extra-reply-data: false
+ # 跳过 Mime 扫描, 忽略错误数据
+ skip-mime-scan: false
+
+output:
+ # 日志等级 trace,debug,info,warn,error
+ log-level: warn
+ # 日志时效 单位天. 超过这个时间之前的日志将会被自动删除. 设置为 0 表示永久保留.
+ log-aging: 15
+ # 是否在每次启动时强制创建全新的文件储存日志. 为 false 的情况下将会在上次启动时创建的日志文件续写
+ log-force-new: true
+ # 是否启用日志颜色
+ log-colorful: true
+ # 是否启用 DEBUG
+ debug: false # 开启调试模式
+
+# 默认中间件锚点
+default-middlewares: &default
+ # 访问密钥, 强烈推荐在公网的服务器设置
+ access-token: ''
+ # 事件过滤器文件目录
+ filter: ''
+ # API限速设置
+ # 该设置为全局生效
+ # 原 cqhttp 虽然启用了 rate_limit 后缀, 但是基本没插件适配
+ # 目前该限速设置为令牌桶算法, 请参考:
+ # https://baike.baidu.com/item/%E4%BB%A4%E7%89%8C%E6%A1%B6%E7%AE%97%E6%B3%95/6597000?fr=aladdin
+ rate-limit:
+ enabled: false # 是否启用限速
+ frequency: 1 # 令牌回复频率, 单位秒
+ bucket: 1 # 令牌桶大小
+
+database: # 数据库相关设置
+ leveldb:
+ # 是否启用内置leveldb数据库
+ # 启用将会增加10-20MB的内存占用和一定的磁盘空间
+ # 关闭将无法使用 撤回 回复 get_msg 等上下文相关功能
+ enable: true
+ sqlite3:
+ # 是否启用内置sqlite3数据库
+ # 启用将会增加一定的内存占用和一定的磁盘空间
+ # 关闭将无法使用 撤回 回复 get_msg 等上下文相关功能
+ enable: false
+ cachettl: 3600000000000 # 1h
+
+# 连接服务列表
+servers:
+ # 添加方式,同一连接方式可添加多个,具体配置说明请查看文档
+ #- http: # http 通信
+ #- ws: # 正向 Websocket
+ #- ws-reverse: # 反向 Websocket
+ #- pprof: #性能分析服务器
+ # 反向WS设置
+ - ws-reverse:
+ # 反向WS Universal 地址
+ # 注意 设置了此项地址后下面两项将会被忽略
+ universal: ws://127.0.0.1:8566/ws
+ # 反向WS API 地址
+ api: ws://your_websocket_api.server
+ # 反向WS Event 地址
+ event: ws://your_websocket_event.server
+ # 重连间隔 单位毫秒
+ reconnect-interval: 3000
+ middlewares:
+ <<: *default # 引用默认中间件
diff --git a/.github/quickstarts/windows/go-cqhttp/device.json b/.github/quickstarts/windows/go-cqhttp/device.json
new file mode 100644
index 00000000..2c765537
--- /dev/null
+++ b/.github/quickstarts/windows/go-cqhttp/device.json
@@ -0,0 +1 @@
+{"display":"MIRAI.328126.001","product":"mirai","device":"mirai","board":"mirai","model":"mirai","finger_print":"mamoe/mirai/mirai:10/MIRAI.200122.001/9131310:user/release-keys","boot_id":"779066ef-140d-cf58-4b54-415b6db79071","proc_version":"Linux version 3.0.31-NAKD7gEP (android-build@xxx.xxx.xxx.xxx.com)","protocol":2,"imei":"427816956058829","brand":"mamoe","bootloader":"unknown","base_band":"","version":{"incremental":"5891938","release":"10","codename":"REL","sdk":29},"sim_info":"T-Mobile","os_type":"android","mac_address":"00:50:56:C0:00:08","ip_address":[10,0,1,3],"wifi_bssid":"00:50:56:C0:00:08","wifi_ssid":"\u003cunknown ssid\u003e","imsi_md5":"cc73b4a6b592dcebb09db63419673a69","android_id":"1e52fc9af1b185eb","apn":"wifi","vendor_name":"MIUI","vendor_os_name":"mirai"}
\ No newline at end of file
diff --git "a/.github/quickstarts/windows/go-cqhttp/scripts/\345\210\235\345\247\213\345\214\226.cmd" "b/.github/quickstarts/windows/go-cqhttp/scripts/\345\210\235\345\247\213\345\214\226.cmd"
new file mode 100644
index 00000000..16adb8f8
--- /dev/null
+++ "b/.github/quickstarts/windows/go-cqhttp/scripts/\345\210\235\345\247\213\345\214\226.cmd"
@@ -0,0 +1,68 @@
+@ECHO OFF
+@CHCP 65001
+SET BASE_DIR=%cd%
+
+ECHO !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ECHO !!
+ECHO !! 如果您是新手,没有特殊需求。一路回车即可安装 !!!!
+ECHO !! 如果您在执行的过程出现错误,可以重新启动此脚本 !!!!
+ECHO !! 如果您遇到问题,可以提交 issue,或者在交流群询问 !!!!
+ECHO !!
+ECHO !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ECHO 当前的安装路径为 %BASE_DIR%
+ECHO 提示:请注意安装路径中不要有空格,否则可能会导致安装失败
+ECHO 提示:安装前先解压程序,不要在压缩包中直接运行
+pause
+
+cd "%BASE_DIR%\go-cqhttp"
+
+cd "%BASE_DIR%"
+ECHO 复制 配置信息...
+set /p "bot_qq=请输入机器人QQ号:"
+copy "%BASE_DIR%\files\go-cqhttp\config.yml" "%BASE_DIR%\go-cqhttp\"
+copy "%BASE_DIR%\files\go-cqhttp\device.json" "%BASE_DIR%\go-cqhttp\"
+setlocal enabledelayedexpansion
+set "file=%BASE_DIR%\go-cqhttp\config.yml"
+set "search=YOUR_BOT_QQ_HERE"
+set "replace=!bot_qq!"
+if exist "%file%" (
+ for /f "usebackq delims=" %%a in ("%file%") do (
+ set "line=%%a"
+ set "line=!line:%search%=%replace%!"
+ echo(!line!
+ )
+) > "%file%.new"
+move /y "%file%.new" "%file%" > nul
+ECHO go-cqhttp 初始化完毕。
+cd "%BASE_DIR%\chatgpt"
+
+ECHO 接下来开始初始化 ChatGPT
+ECHO 初始化 pip...
+set PYTHON_EXECUTABLE="%BASE_DIR%\python3.11\python.exe"
+cd "%BASE_DIR%\python3.11"
+@REM %PYTHON_EXECUTABLE% get-pip.py
+
+ECHO 安装依赖...
+cd "%BASE_DIR%\chatgpt"
+
+REM 如果下载的依赖不是最新版
+REM 请修改 https://mirrors.aliyun.com/pypi/simple/ 为 https://pypi.org/simple/
+REM 然后重新执行
+
+%PYTHON_EXECUTABLE% -m pip install -i https://mirrors.aliyun.com/pypi/simple/ --extra-index-url https://pypi.org/simple/ -r requirements.txt
+
+ECHO 接下来将会打开 config.cfg,请修改里面的信息。
+
+cd "%BASE_DIR%\chatgpt"
+COPY %BASE_DIR%\files\config.example.go-cqhttp.cfg config.cfg
+notepad config.cfg
+cd "%BASE_DIR%"
+
+cls
+
+COPY "%BASE_DIR%\files\go-cqhttp\scripts\启动ChatGPT.cmd" .
+COPY "%BASE_DIR%\files\go-cqhttp\scripts\启动go-cqhttp.cmd" .
+ECHO "接下来请先执行 【启动ChatGPT.cmd】,启动程序。"
+ECHO "然后执行 【启动go-cqhttp.cmd】 并登录机器人 QQ,然后就可以开始使用了!"
+
+pause
diff --git "a/.github/quickstarts/windows/scripts/\345\220\257\345\212\250ChatGPT.cmd" "b/.github/quickstarts/windows/go-cqhttp/scripts/\345\220\257\345\212\250ChatGPT.cmd"
similarity index 60%
rename from ".github/quickstarts/windows/scripts/\345\220\257\345\212\250ChatGPT.cmd"
rename to ".github/quickstarts/windows/go-cqhttp/scripts/\345\220\257\345\212\250ChatGPT.cmd"
index b3316fad..82396b39 100644
--- "a/.github/quickstarts/windows/scripts/\345\220\257\345\212\250ChatGPT.cmd"
+++ "b/.github/quickstarts/windows/go-cqhttp/scripts/\345\220\257\345\212\250ChatGPT.cmd"
@@ -1,6 +1,10 @@
@ECHO OFF
+@CHCP 65001
+
+SET PATH="%cd%\ffmpeg\bin;%PATH%"
+
TITLE [ChatGPT for QQ] ChatGPT 端正在运行...
-cd chatgpt && python3.11\python.exe bot.py
+cd chatgpt && ..\python3.11\python.exe bot.py
TITLE [ChatGPT for QQ] ChatGPT 端已停止运行
ECHO 程序已停止运行。
PAUSE
\ No newline at end of file
diff --git "a/.github/quickstarts/windows/go-cqhttp/scripts/\345\220\257\345\212\250go-cqhttp.cmd" "b/.github/quickstarts/windows/go-cqhttp/scripts/\345\220\257\345\212\250go-cqhttp.cmd"
new file mode 100644
index 00000000..0b552350
--- /dev/null
+++ "b/.github/quickstarts/windows/go-cqhttp/scripts/\345\220\257\345\212\250go-cqhttp.cmd"
@@ -0,0 +1,12 @@
+@ECHO OFF
+@CHCP 65001
+
+TITLE [ChatGPT for QQ] go-cqhttp 端正在运行...
+
+SET PATH="%cd%\ffmpeg\bin;%PATH%"
+
+cd go-cqhttp && go-cqhttp -faststart
+TITLE [ChatGPT for QQ] go-cqhttp 端已停止运行
+
+echo 程序已停止运行
+PAUSE
\ No newline at end of file
diff --git "a/.github/quickstarts/windows/scripts/\345\210\235\345\247\213\345\214\226.cmd" "b/.github/quickstarts/windows/mirai/scripts/\345\210\235\345\247\213\345\214\226.cmd"
similarity index 90%
rename from ".github/quickstarts/windows/scripts/\345\210\235\345\247\213\345\214\226.cmd"
rename to ".github/quickstarts/windows/mirai/scripts/\345\210\235\345\247\213\345\214\226.cmd"
index dc42c3bc..c44a0547 100644
--- "a/.github/quickstarts/windows/scripts/\345\210\235\345\247\213\345\214\226.cmd"
+++ "b/.github/quickstarts/windows/mirai/scripts/\345\210\235\345\247\213\345\214\226.cmd"
@@ -1,70 +1,70 @@
-@ECHO OFF
-@CHCP 65001
-SET BASE_DIR=%cd%
-
-ECHO 正在初始化 Mirai...
-ECHO !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-ECHO !!
-ECHO !! 如果您是新手,没有特殊需求。一路回车即可安装 !!!!
-ECHO !! 如果您在执行的过程出现错误,可以重新启动此脚本 !!!!
-ECHO !! 如果您遇到问题,可以提交 issue,或者在交流群询问 !!!!
-ECHO !!
-ECHO !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
-ECHO 当前的安装路径为 %BASE_DIR%
-ECHO 提示:请注意安装路径中不要有空格,否则可能会导致安装失败
-ECHO 提示:安装前先解压程序,不要在压缩包中直接运行
-pause
-
-cd "%BASE_DIR%\mirai"
-@REM mcl-installer.exe
-
-@REM ECHO 安装 mirai-api-http 插件...
-@REM ECHO 插件介绍:https://github.com/project-mirai/mirai-api-http
-@REM cmd /c mcl.cmd --update-package net.mamoe:mirai-api-http --channel stable-v2 --type plugin
-@REM
-@REM ECHO 安装 mirai-device-generator 插件...
-@REM ECHO 插件介绍:https://github.com/cssxsh/mirai-device-generator
-@REM cmd /c mcl.cmd --update-package xyz.cssxsh.mirai:mirai-device-generator --channel stable --type plugin
-@REM
-@REM ECHO 安装 fix-protocol-version 插件...
-@REM ECHO 插件介绍:https://github.com/cssxsh/fix-protocol-version
-@REM cmd /c mcl.cmd --update-package xyz.cssxsh.mirai:fix-protocol-version --channel stable --type plugin
-
-cd "%BASE_DIR%"
-ECHO 复制 mirai-http-api 配置信息...
-mkdir "%BASE_DIR%\mirai\config\net.mamoe.mirai-api-http"
-copy "%BASE_DIR%\files\mirai-http-api-settings.yml" "%BASE_DIR%\mirai\config\net.mamoe.mirai-api-http\setting.yml"
-
-ECHO Mirai 初始化完毕。
-cd "%BASE_DIR%\chatgpt"
-
-ECHO 接下来开始初始化 ChatGPT
-ECHO 初始化 pip...
-set PYTHON_EXECUTABLE="%cd%\python3.11\python.exe"
-cd "%BASE_DIR%\chatgpt\python3.11"
-@REM %PYTHON_EXECUTABLE% get-pip.py
-
-ECHO 安装依赖...
-cd "%BASE_DIR%\chatgpt"
-
-REM 如果下载的依赖不是最新版
-REM 请修改 https://mirrors.aliyun.com/pypi/simple/ 为 https://pypi.org/simple/
-REM 然后重新执行
-
-%PYTHON_EXECUTABLE% -m pip install -i https://mirrors.aliyun.com/pypi/simple/ --extra-index-url https://pypi.org/simple/ -r requirements.txt
-
-ECHO 接下来将会打开 config.cfg,请修改里面的信息。
-
-cd "%BASE_DIR%\chatgpt"
-COPY config.example.cfg config.cfg
-notepad config.cfg
-cd "%BASE_DIR%"
-
-cls
-
-COPY "%BASE_DIR%\files\scripts\启动ChatGPT.cmd" .
-COPY "%BASE_DIR%\files\scripts\启动Mirai.cmd" .
-ECHO "接下来请先执行 【启动ChatGPT.cmd】,启动程序。"
-ECHO "然后执行 【启动Mirai.cmd】 并登录机器人 QQ,然后就可以开始使用了!"
-
-pause
+@ECHO OFF
+@CHCP 65001
+SET BASE_DIR=%cd%
+
+ECHO 正在初始化 Mirai...
+ECHO !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ECHO !!
+ECHO !! 如果您是新手,没有特殊需求。一路回车即可安装 !!!!
+ECHO !! 如果您在执行的过程出现错误,可以重新启动此脚本 !!!!
+ECHO !! 如果您遇到问题,可以提交 issue,或者在交流群询问 !!!!
+ECHO !!
+ECHO !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
+ECHO 当前的安装路径为 %BASE_DIR%
+ECHO 提示:请注意安装路径中不要有空格,否则可能会导致安装失败
+ECHO 提示:安装前先解压程序,不要在压缩包中直接运行
+pause
+
+cd "%BASE_DIR%\mirai"
+@REM mcl-installer.exe
+
+@REM ECHO 安装 mirai-api-http 插件...
+@REM ECHO 插件介绍:https://github.com/project-mirai/mirai-api-http
+@REM cmd /c mcl.cmd --update-package net.mamoe:mirai-api-http --channel stable-v2 --type plugin
+@REM
+@REM ECHO 安装 mirai-device-generator 插件...
+@REM ECHO 插件介绍:https://github.com/cssxsh/mirai-device-generator
+@REM cmd /c mcl.cmd --update-package xyz.cssxsh.mirai:mirai-device-generator --channel stable --type plugin
+@REM
+@REM ECHO 安装 fix-protocol-version 插件...
+@REM ECHO 插件介绍:https://github.com/cssxsh/fix-protocol-version
+@REM cmd /c mcl.cmd --update-package xyz.cssxsh.mirai:fix-protocol-version --channel stable --type plugin
+
+cd "%BASE_DIR%"
+ECHO 复制 mirai-http-api 配置信息...
+mkdir "%BASE_DIR%\mirai\config\net.mamoe.mirai-api-http"
+copy "%BASE_DIR%\files\mirai-http-api-settings.yml" "%BASE_DIR%\mirai\config\net.mamoe.mirai-api-http\setting.yml"
+
+ECHO Mirai 初始化完毕。
+cd "%BASE_DIR%\chatgpt"
+
+ECHO 接下来开始初始化 ChatGPT
+ECHO 初始化 pip...
+set PYTHON_EXECUTABLE="%BASE_DIR%\python3.11\python.exe"
+cd "%BASE_DIR%\python3.11"
+@REM %PYTHON_EXECUTABLE% get-pip.py
+
+ECHO 安装依赖...
+cd "%BASE_DIR%\chatgpt"
+
+REM 如果下载的依赖不是最新版
+REM 请修改 https://mirrors.aliyun.com/pypi/simple/ 为 https://pypi.org/simple/
+REM 然后重新执行
+
+%PYTHON_EXECUTABLE% -m pip install -i https://mirrors.aliyun.com/pypi/simple/ --extra-index-url https://pypi.org/simple/ -r requirements.txt
+
+ECHO 接下来将会打开 config.cfg,请修改里面的信息。
+
+cd "%BASE_DIR%\chatgpt"
+COPY config.example.cfg config.cfg
+notepad config.cfg
+cd "%BASE_DIR%"
+
+cls
+
+COPY "%BASE_DIR%\files\mirai\scripts\启动ChatGPT.cmd" .
+COPY "%BASE_DIR%\files\mirai\scripts\启动Mirai.cmd" .
+ECHO "接下来请先执行 【启动ChatGPT.cmd】,启动程序。"
+ECHO "然后执行 【启动Mirai.cmd】 并登录机器人 QQ,然后就可以开始使用了!"
+
+pause
diff --git "a/.github/quickstarts/windows/mirai/scripts/\345\220\257\345\212\250ChatGPT.cmd" "b/.github/quickstarts/windows/mirai/scripts/\345\220\257\345\212\250ChatGPT.cmd"
new file mode 100644
index 00000000..4878dfa0
--- /dev/null
+++ "b/.github/quickstarts/windows/mirai/scripts/\345\220\257\345\212\250ChatGPT.cmd"
@@ -0,0 +1,11 @@
+@ECHO OFF
+@CHCP 65001
+
+TITLE [ChatGPT for QQ] ChatGPT 端正在运行...
+
+SET PATH="%cd%\ffmpeg\bin;%PATH%"
+
+cd chatgpt && ..\python3.11\python.exe bot.py
+TITLE [ChatGPT for QQ] ChatGPT 端已停止运行
+ECHO 程序已停止运行。
+PAUSE
\ No newline at end of file
diff --git "a/.github/quickstarts/windows/scripts/\345\220\257\345\212\250Mirai.cmd" "b/.github/quickstarts/windows/mirai/scripts/\345\220\257\345\212\250Mirai.cmd"
similarity index 76%
rename from ".github/quickstarts/windows/scripts/\345\220\257\345\212\250Mirai.cmd"
rename to ".github/quickstarts/windows/mirai/scripts/\345\220\257\345\212\250Mirai.cmd"
index e09e7349..1c12d4ba 100644
--- "a/.github/quickstarts/windows/scripts/\345\220\257\345\212\250Mirai.cmd"
+++ "b/.github/quickstarts/windows/mirai/scripts/\345\220\257\345\212\250Mirai.cmd"
@@ -1,6 +1,10 @@
@ECHO OFF
+@CHCP 65001
+
TITLE [ChatGPT for QQ] Mirai 端正在运行...
+SET PATH="%cd%\ffmpeg\bin;%PATH%"
+
cd mirai && mcl
TITLE [ChatGPT for QQ] Mirai 端已停止运行
diff --git a/.github/workflows/quickstart-windows-dev-gocqhttp.yml b/.github/workflows/quickstart-windows-dev-gocqhttp.yml
new file mode 100644
index 00000000..894f1b7d
--- /dev/null
+++ b/.github/workflows/quickstart-windows-dev-gocqhttp.yml
@@ -0,0 +1,79 @@
+name: Windows Quickstart Dev (go-cqhttp)
+
+on:
+ workflow_dispatch:
+ push:
+ branches:
+ - 'browser-version-dev'
+
+concurrency:
+ group: ${{ github.workflow }}-${{ github.event.pull_request.number || github.ref }}
+ cancel-in-progress: true
+
+jobs:
+ build:
+ name: Quickstart (GO-CQHTTP)
+ runs-on: Windows-latest
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+ - name: Generate files
+ run: |
+ mkdir C:/generated_files
+ mkdir C:/tmp_files
+ echo "Creating folders..."
+ cd C:/generated_files
+ echo "Downloading go-cqhttp..."
+ mkdir go-cqhttp
+ mkdir chatgpt
+ mkdir ffmpeg
+ Invoke-WebRequest -URI https://github.com/Mrs4s/go-cqhttp/releases/download/v1.0.1/go-cqhttp_windows_amd64.exe -OutFile C:/generated_files/go-cqhttp/go-cqhttp.exe
+
+ cp -r D:\a\chatgpt-mirai-qq-bot\chatgpt-mirai-qq-bot\* C:\generated_files\chatgpt\
+
+ echo "Downloading ffmpeg ..."
+ Invoke-WebRequest https://www.gyan.dev/ffmpeg/builds/packages/ffmpeg-6.0-full_build.7z -OutFile C:/tmp_files/ffmpeg.7z
+ 7z x C:/tmp_files/ffmpeg.zip -r -oC:/generated_files/ffmpeg
+
+ echo "Downloading Python3.11 ..."
+ Invoke-WebRequest https://www.python.org/ftp/python/3.11.2/python-3.11.2-embed-amd64.zip -OutFile C:/tmp_files/python.zip
+ 7z x C:/tmp_files/python.zip -r -oC:/generated_files/python3.11
+
+ echo "Downloading get-pip.py ..."
+ Invoke-WebRequest -URI https://bootstrap.pypa.io/get-pip.py -OutFile C:/generated_files/python3.11/get-pip.py
+ echo "import site" >> C:/generated_files/python3.11/python311._pth
+
+ echo "Moving files..."
+ mv D:\a\chatgpt-mirai-qq-bot\chatgpt-mirai-qq-bot\.github\quickstarts\windows\ C:/generated_files/files/
+
+ echo "Replacing..."
+ cp C:/generated_files/files/go-cqhttp/scripts/初始化.cmd C:/generated_files/
+
+ Invoke-WebRequest -URI https://github.com/lss233/awesome-chatgpt-qq-presets/archive/refs/heads/master.zip -OutFile C:/tmp_files/presets.zip
+ 7z x C:/tmp_files/presets.zip -oC:/tmp_files/
+ Copy-Item C:\tmp_files\awesome-chatgpt-qq-presets-master\* -Destination C:\generated_files\chatgpt\presets\ -Recurse
+
+ Invoke-WebRequest -URI https://github.com/wkhtmltopdf/packaging/releases/download/0.12.6-1/wkhtmltox-0.12.6-1.mxe-cross-win64.7z -O C:/tmp_files/wkhtmltox.7z
+
+ echo "Downloading vc_redist.exe..."
+ Invoke-WebRequest -URI https://aka.ms/vs/17/release/vc_redist.x64.exe -O "C:\generated_files\【语音功能依赖】vc_redist.x64.exe"
+
+ echo "Setting up wkhtmltox"
+ 7z x C:/tmp_files/wkhtmltox.7z -oC:/tmp_files/
+ cp C:/tmp_files/wkhtmltox/bin/wkhtmltoimage.exe C:\generated_files\chatgpt\
+
+ echo "Downloading packages..."
+ cd C:/generated_files/chatgpt
+ ..\python3.11\python.exe C:/generated_files/python3.11/get-pip.py
+ ..\python3.11\python.exe -m pip install -r requirements.txt
+
+ echo "Packing..."
+ cd C:/generated_files
+ 7z a quickstart-windows-dev-go-cqhttp-amd64.zip C:\generated_files\*
+ - name: Archive production artifacts
+ uses: actions/upload-artifact@v3
+ with:
+ name: quickstart-windows-dev-go-cqhttp-amd64.zip
+ path: |
+ C:\generated_files\quickstart-windows-dev-go-cqhttp-amd64.zip
diff --git a/.github/workflows/quickstart-windows-dev.yaml b/.github/workflows/quickstart-windows-dev-mirai.yaml
similarity index 79%
rename from .github/workflows/quickstart-windows-dev.yaml
rename to .github/workflows/quickstart-windows-dev-mirai.yaml
index 10425d34..bfc1ca0e 100644
--- a/.github/workflows/quickstart-windows-dev.yaml
+++ b/.github/workflows/quickstart-windows-dev-mirai.yaml
@@ -1,4 +1,4 @@
-name: Create Quickstart for Windows 64 on dev
+name: Windows Quickstart Dev (Mirai)
on:
workflow_dispatch:
@@ -26,6 +26,7 @@ jobs:
cd C:/generated_files
mkdir mirai
mkdir chatgpt
+ mkdir ffmpeg
mkdir mirai/plugins
echo "Downloading JRE..."
Invoke-WebRequest -URI https://download.bell-sw.com/java/17.0.6+10/bellsoft-jre17.0.6+10-windows-amd64.zip -OutFile C:/tmp_files/jre.zip
@@ -48,19 +49,24 @@ jobs:
./mcl.cmd --dry-run
cp -r D:\a\chatgpt-mirai-qq-bot\chatgpt-mirai-qq-bot\* C:\generated_files\chatgpt\
+
+ echo "Downloading ffmpeg ..."
+ Invoke-WebRequest https://www.gyan.dev/ffmpeg/builds/packages/ffmpeg-6.0-full_build.7z -OutFile C:/tmp_files/ffmpeg.7z
+ 7z x C:/tmp_files/ffmpeg.zip -r -oC:/generated_files/ffmpeg
+
echo "Downloading Python3.11 ..."
Invoke-WebRequest https://www.python.org/ftp/python/3.11.2/python-3.11.2-embed-amd64.zip -OutFile C:/tmp_files/python.zip
- 7z x C:/tmp_files/python.zip -r -oC:/generated_files/chatgpt/python3.11
+ 7z x C:/tmp_files/python.zip -r -oC:/generated_files/python3.11
echo "Downloading get-pip.py ..."
- Invoke-WebRequest -URI https://bootstrap.pypa.io/get-pip.py -OutFile C:/generated_files/chatgpt/python3.11/get-pip.py
- echo "import site" >> C:/generated_files/chatgpt/python3.11/python311._pth
+ Invoke-WebRequest -URI https://bootstrap.pypa.io/get-pip.py -OutFile C:/generated_files/python3.11/get-pip.py
+ echo "import site" >> C:/generated_files/python3.11/python311._pth
echo "Moving files..."
mv D:\a\chatgpt-mirai-qq-bot\chatgpt-mirai-qq-bot\.github\quickstarts\windows\ C:/generated_files/files/
echo "Replacing..."
- cp C:/generated_files/files/scripts/初始化.cmd C:/generated_files/
+ cp C:/generated_files/files/mirai/scripts/初始化.cmd C:/generated_files/
Invoke-WebRequest -URI https://github.com/lss233/awesome-chatgpt-qq-presets/archive/refs/heads/master.zip -OutFile C:/tmp_files/presets.zip
7z x C:/tmp_files/presets.zip -oC:/tmp_files/
@@ -77,16 +83,16 @@ jobs:
echo "Downloading packages..."
cd C:/generated_files/chatgpt
- python3.11\python.exe C:/generated_files/chatgpt/python3.11/get-pip.py
- python3.11\python.exe -m pip install -r requirements.txt
+ ..\python3.11\python.exe C:/generated_files/python3.11/get-pip.py
+ ..\python3.11\python.exe -m pip install -r requirements.txt
echo "Packing..."
cd C:/generated_files
- 7z a quickstart-windows-amd64.zip C:\generated_files\*
+ 7z a quickstart-windows-mirai-amd64.zip C:\generated_files\*
- name: Archive production artifacts
uses: actions/upload-artifact@v3
with:
- name: quickstart-windows-amd64.zip
+ name: quickstart-windows-mirai-amd64.zip
path: |
- C:\generated_files\quickstart-windows-amd64.zip
+ C:\generated_files\quickstart-windows-mirai-amd64.zip
diff --git a/.github/workflows/quickstart-windows-gocqhttp.yml b/.github/workflows/quickstart-windows-gocqhttp.yml
new file mode 100644
index 00000000..31ce6b03
--- /dev/null
+++ b/.github/workflows/quickstart-windows-gocqhttp.yml
@@ -0,0 +1,84 @@
+name: Windows Quickstart (go-cqhttp)
+
+on:
+ workflow_dispatch:
+ push:
+ tags:
+ - '**'
+
+jobs:
+ build:
+ name: Quickstart (GO-CQHTTP)
+ runs-on: Windows-latest
+
+ steps:
+ - name: Checkout
+ uses: actions/checkout@v3
+ - name: Generate files
+ run: |
+ mkdir C:/generated_files
+ mkdir C:/tmp_files
+ echo "Creating folders..."
+ cd C:/generated_files
+ echo "Downloading go-cqhttp..."
+ mkdir go-cqhttp
+ mkdir chatgpt
+ mkdir ffmpeg
+ Invoke-WebRequest -URI https://github.com/Mrs4s/go-cqhttp/releases/download/v1.0.1/go-cqhttp_windows_amd64.exe -OutFile C:/generated_files/go-cqhttp/go-cqhttp.exe
+
+ cp -r D:\a\chatgpt-mirai-qq-bot\chatgpt-mirai-qq-bot\* C:\generated_files\chatgpt\
+
+ echo "Downloading ffmpeg ..."
+ Invoke-WebRequest https://www.gyan.dev/ffmpeg/builds/packages/ffmpeg-6.0-full_build.7z -OutFile C:/tmp_files/ffmpeg.7z
+ 7z x C:/tmp_files/ffmpeg.zip -r -oC:/generated_files/ffmpeg
+
+ echo "Downloading Python3.11 ..."
+ Invoke-WebRequest https://www.python.org/ftp/python/3.11.2/python-3.11.2-embed-amd64.zip -OutFile C:/tmp_files/python.zip
+ 7z x C:/tmp_files/python.zip -r -oC:/generated_files/python3.11
+
+ echo "Downloading get-pip.py ..."
+ Invoke-WebRequest -URI https://bootstrap.pypa.io/get-pip.py -OutFile C:/generated_files/python3.11/get-pip.py
+ echo "import site" >> C:/generated_files/python3.11/python311._pth
+
+ echo "Moving files..."
+ mv D:\a\chatgpt-mirai-qq-bot\chatgpt-mirai-qq-bot\.github\quickstarts\windows\ C:/generated_files/files/
+
+ echo "Replacing..."
+ cp C:/generated_files/files/go-cqhttp/scripts/初始化.cmd C:/generated_files/
+
+ Invoke-WebRequest -URI https://github.com/lss233/awesome-chatgpt-qq-presets/archive/refs/heads/master.zip -OutFile C:/tmp_files/presets.zip
+ 7z x C:/tmp_files/presets.zip -oC:/tmp_files/
+ Copy-Item C:\tmp_files\awesome-chatgpt-qq-presets-master\* -Destination C:\generated_files\chatgpt\presets\ -Recurse
+
+ Invoke-WebRequest -URI https://github.com/wkhtmltopdf/packaging/releases/download/0.12.6-1/wkhtmltox-0.12.6-1.mxe-cross-win64.7z -O C:/tmp_files/wkhtmltox.7z
+
+ echo "Downloading vc_redist.exe..."
+ Invoke-WebRequest -URI https://aka.ms/vs/17/release/vc_redist.x64.exe -O "C:\generated_files\【语音功能依赖】vc_redist.x64.exe"
+
+ echo "Setting up wkhtmltox"
+ 7z x C:/tmp_files/wkhtmltox.7z -oC:/tmp_files/
+ cp C:/tmp_files/wkhtmltox/bin/wkhtmltoimage.exe C:\generated_files\chatgpt\
+
+ echo "Downloading packages..."
+ cd C:/generated_files/chatgpt
+ ..\python3.11\python.exe C:/generated_files/python3.11/get-pip.py
+ ..\python3.11\python.exe -m pip install -r requirements.txt
+
+ echo "Packing..."
+ cd C:/generated_files
+ 7z a quickstart-windows-go-cqhttp-amd64.zip C:\generated_files\*
+ - name: Archive production artifacts
+ uses: actions/upload-artifact@v3
+ with:
+ name: quickstart-windows-go-cqhttp-amd64.zip
+ path: |
+ C:\generated_files\quickstart-windows-go-cqhttp-amd64.zip
+ - name: Upload compressed files to release
+ uses: svenstaro/upload-release-action@v2
+ with:
+ repo_token: ${{ secrets.GITHUB_TOKEN }}
+ file: C:\generated_files\quickstart-windows-go-cqhttp-amd64.zip
+ asset_name: Windows-quickstart-go-cqhttp-${{ github.ref }}.zip
+ tag: ${{ github.ref }}
+ overwrite: true
+ body: "quickstart-windows-amd64.zip is to quickstart go-cqhttp for Windows x64 user"
diff --git a/.github/workflows/quickstart-windows.yml b/.github/workflows/quickstart-windows-mirai.yml
similarity index 79%
rename from .github/workflows/quickstart-windows.yml
rename to .github/workflows/quickstart-windows-mirai.yml
index f62353d9..c2a977a2 100644
--- a/.github/workflows/quickstart-windows.yml
+++ b/.github/workflows/quickstart-windows-mirai.yml
@@ -1,4 +1,4 @@
-name: Create Quickstart for Windows 64
+name: Windows Quickstart (Mirai)
on:
workflow_dispatch:
@@ -22,6 +22,7 @@ jobs:
cd C:/generated_files
mkdir mirai
mkdir chatgpt
+ mkdir ffmpeg
mkdir mirai/plugins
echo "Downloading JRE..."
Invoke-WebRequest -URI https://download.bell-sw.com/java/17.0.6+10/bellsoft-jre17.0.6+10-windows-amd64.zip -OutFile C:/tmp_files/jre.zip
@@ -44,19 +45,24 @@ jobs:
./mcl.cmd --dry-run
cp -r D:\a\chatgpt-mirai-qq-bot\chatgpt-mirai-qq-bot\* C:\generated_files\chatgpt\
+
+ echo "Downloading ffmpeg ..."
+ Invoke-WebRequest https://www.gyan.dev/ffmpeg/builds/packages/ffmpeg-6.0-full_build.7z -OutFile C:/tmp_files/ffmpeg.7z
+ 7z x C:/tmp_files/ffmpeg.zip -r -oC:/generated_files/ffmpeg
+
echo "Downloading Python3.11 ..."
Invoke-WebRequest https://www.python.org/ftp/python/3.11.2/python-3.11.2-embed-amd64.zip -OutFile C:/tmp_files/python.zip
- 7z x C:/tmp_files/python.zip -r -oC:/generated_files/chatgpt/python3.11
+ 7z x C:/tmp_files/python.zip -r -oC:/generated_files/python3.11
echo "Downloading get-pip.py ..."
- Invoke-WebRequest -URI https://bootstrap.pypa.io/get-pip.py -OutFile C:/generated_files/chatgpt/python3.11/get-pip.py
- echo "import site" >> C:/generated_files/chatgpt/python3.11/python311._pth
+ Invoke-WebRequest -URI https://bootstrap.pypa.io/get-pip.py -OutFile C:/generated_files/python3.11/get-pip.py
+ echo "import site" >> C:/generated_files/python3.11/python311._pth
echo "Moving files..."
mv D:\a\chatgpt-mirai-qq-bot\chatgpt-mirai-qq-bot\.github\quickstarts\windows\ C:/generated_files/files/
echo "Replacing..."
- cp C:/generated_files/files/scripts/初始化.cmd C:/generated_files/
+ cp C:/generated_files/files/mirai/scripts/初始化.cmd C:/generated_files/
Invoke-WebRequest -URI https://github.com/lss233/awesome-chatgpt-qq-presets/archive/refs/heads/master.zip -OutFile C:/tmp_files/presets.zip
7z x C:/tmp_files/presets.zip -oC:/tmp_files/
@@ -73,23 +79,23 @@ jobs:
echo "Downloading packages..."
cd C:/generated_files/chatgpt
- python3.11\python.exe C:/generated_files/chatgpt/python3.11/get-pip.py
- python3.11\python.exe -m pip install -r requirements.txt
+ ..\python3.11\python.exe C:/generated_files/python3.11/get-pip.py
+ ..\python3.11\python.exe -m pip install -r requirements.txt
echo "Packing..."
cd C:/generated_files
- 7z a quickstart-windows-amd64.zip C:\generated_files\*
+ 7z a quickstart-windows-mirai-amd64.zip C:\generated_files\*
- name: Archive production artifacts
uses: actions/upload-artifact@v3
with:
- name: quickstart-windows-amd64.zip
+ name: quickstart-windows-mirai-amd64.zip
path: |
- C:\generated_files\quickstart-windows-amd64.zip
+ C:\generated_files\quickstart-windows-mirai-amd64.zip
- name: Upload compressed files to release
uses: svenstaro/upload-release-action@v2
with:
repo_token: ${{ secrets.GITHUB_TOKEN }}
- file: C:\generated_files\quickstart-windows-amd64.zip
+ file: C:\generated_files\quickstart-windows-mirai-amd64.zip
asset_name: Windows-quickstart-${{ github.ref }}.zip
tag: ${{ github.ref }}
overwrite: true
diff --git a/Dockerfile b/Dockerfile
index 19da2f0a..3c2d4555 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -5,9 +5,8 @@ ENV DEBIAN_FRONTEND=noninteractive
COPY ./fonts/sarasa-mono-sc-regular.ttf /usr/share/fonts/
RUN apt-get update && \
- apt install --no-install-recommends xvfb binutils qtbase5-dev wkhtmltopdf ffmpeg -yq && \
+ apt install --no-install-recommends xvfb binutils build-essential qtbase5-dev wkhtmltopdf ffmpeg -yq && \
(strip --remove-section=.note.ABI-tag /usr/lib/x86_64-linux-gnu/libQt5Core.so.5 || true) && \
- apt-get remove --purge -yq binutils && \
apt-get clean && \
apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false && \
rm -rf /var/lib/apt/lists/*
@@ -18,6 +17,8 @@ WORKDIR /app
COPY requirements.txt /app
RUN pip install --no-cache-dir -r requirements.txt && pip cache purge
+RUN apt-get remove --purge -yq binutils
+
COPY . /app
CMD ["/bin/bash", "/app/docker/start.sh"]
diff --git a/Dockerfile-cn b/Dockerfile-cn
new file mode 100644
index 00000000..5ac60ab0
--- /dev/null
+++ b/Dockerfile-cn
@@ -0,0 +1,26 @@
+FROM python:3.11.2-slim-bullseye
+
+ENV DEBIAN_FRONTEND=noninteractive
+
+COPY ./fonts/sarasa-mono-sc-regular.ttf /usr/share/fonts/
+
+RUN sed -i s@/deb.debian.org/@/mirrors.aliyun.com/@g /etc/apt/sources.list
+RUN cat /etc/apt/sources.list
+
+RUN apt-get update && \
+ apt install --no-install-recommends xvfb binutils qtbase5-dev wkhtmltopdf ffmpeg -yq && \
+ (strip --remove-section=.note.ABI-tag /usr/lib/x86_64-linux-gnu/libQt5Core.so.5 || true) && \
+ apt-get remove --purge -yq binutils && \
+ apt-get clean && \
+ apt-get purge -y --auto-remove -o APT::AutoRemove::RecommendsImportant=false && \
+ rm -rf /var/lib/apt/lists/*
+
+RUN mkdir -p /app
+WORKDIR /app
+
+COPY requirements.txt /app
+RUN pip install -i https://pypi.tuna.tsinghua.edu.cn/simple --trusted-host pypi.tuna.tsinghua.edu.cn --no-cache-dir -r requirements.txt && pip cache purge
+
+COPY . /app
+
+CMD ["/bin/bash", "/app/docker/start.sh"]
diff --git a/README.md b/README.md
index 99ab945f..1741e94c 100644
--- a/README.md
+++ b/README.md
@@ -21,7 +21,13 @@
***
-* [交流群(Discord)](https://discord.gg/cc3S2R6RQV)会发布最新的项目动态、问题答疑和交流 [QQ 群](https://jq.qq.com/?_wv=1027&k=XbGuxdTu) 。
+* [Discord 一群](https://discord.gg/cc3S2R6RQV)、
+ [QQ 二群](http://qm.qq.com/cgi-bin/qm/qr?_wv=1027&k=S1R4eIlODtyKZsEKfWxb2-nOIHELbeJY&authKey=kAftCAALE8OJgwQnArrD6zPtncCAaY456QgUXT3l2OMJ57NwRXRkhv4KL7DzOLzs&noverify=0&group_code=373254418)、
+ [QQ 三群](http://qm.qq.com/cgi-bin/qm/qr?_wv=1027&k=urlhCH8y7Ro2S-iXt63X4s5eILUny4Iw&authKey=ejiwoNa4Yez6IMLyf2vj%2FeRiC1frdFrNNekbRfaPnSQbcD7bgebo5y5A7rPaRKBq&noverify=0&group_code=533109074)、
+ [QQ 四群](http://qm.qq.com/cgi-bin/qm/qr?_wv=1027&k=Ibiu6EmXof30Fa7MJ5j8nJFwaUGTf5bM&authKey=YKx5a%2BK5qnWkk5VlsxxDfYl0nCrKSekQm%2FoLQVqr%2FcO%2FQY2S6N24XdI23XugBrF0&noverify=0&group_code=799737883)、
+ [QQ 五群](http://qm.qq.com/cgi-bin/qm/qr?_wv=1027&k=lDkVPDAeiz6M-ig9cdS9tqhSH6_topox&authKey=B%2FRPYVUjk3dYPw5D4o6C2TpqeoKTG0nXEiKDCG%2Bh4JYY2RPqDQGt37SGl32j0hHw&noverify=0&group_code=805081636)、
+ [QQ 开发群](http://qm.qq.com/cgi-bin/qm/qr?_wv=1027&k=lisyXibhUj93DgIZptQu3VZ4ka3F5-rW&authKey=PBCzRQX4Zei%2BB6n5Tdyp9p5bqcF0tLBlfGANT4dSSKQIFYR66WwaZSMEDahWo%2FzZ&noverify=0&group_code=701933732)
+ 会发布最新的项目动态、视频教程、问题答疑和交流。
加群之前先看[这里](https://github.com/lss233/chatgpt-mirai-qq-bot/issues)的内容能不能解决你的问题。
如果不能解决,把遇到的问题、**日志**和配置文件准备好后再提问。
* [调试群](https://jq.qq.com/?_wv=1027&k=TBX8Saq7) 这个群里有很多 ChatGPT QQ 机器人,不解答技术问题。
@@ -40,7 +46,7 @@
* [x] 百度云内容审核
* [x] 额度限制
* [x] 人格设定
-* [x] 支持 Mirai、 go-cqhttp、 Telegram、Discord
+* [x] 支持 Mirai、 go-cqhttp、 Telegram、Discord、微信
* [x] 可作为 HTTP 服务端提供 Web API
* [x] 支持 ChatGPT 网页版
* [x] 支持 ChatGPT Plus
@@ -61,7 +67,8 @@
| OneBot | 支持 | 支持 | 支持 | 支持 | 支持 | 支持 |
| Telegram | 支持 | 支持 | 部分支持 | 部分支持 | 支持 | 支持 |
| Discord | 支持 | 支持 | 部分支持 | 不支持 | 支持 | 支持 |
-
+| 企业微信 | 支持 | 支持 | 支持 | 不支持 | 支持 | 支持 |
+| 个人微信 | 支持 | 支持 | 支持 | 不支持 | 支持 | 支持 |
## 🐎 命令
@@ -72,13 +79,28 @@
如果你是手机党,可以看这个纯用手机的部署教程(使用 Linux 服务器):https://www.bilibili.com/video/av949514538
+
+
+ AidLux: 仅使用旧安卓手机进行部署
+执行下面这行命令启动自动安装脚本。
+
+```bash
+bash -c "$(wget -O- https://gist.githubusercontent.com/B17w153/f77c2726c4eca4e05b488f9af58823a5/raw/4410356eba091d3259c48506fb68112e68db729b/install_bot_aidlux.sh)"
+```
+[部署教程](https://github.com/lss233/chatgpt-for-bot-docs/tree/main/bu-shu-jiao-cheng/kuai-su-bu-shu-jiao-cheng/linux-yi-jian-bu-shu-jiao-cheng.md)
+
+
+
+
+
+
Linux: 通过快速部署脚本部署 (新人推荐)
执行下面这行命令启动自动部署脚本。
它会为你安装 Docker、 Docker Compose 和编写配置文件。
```bash
-bash -c "$(curl -fsSL https://gist.githubusercontent.com/lss233/54f0f794f2157665768b1bdcbed837fd/raw/chatgpt-mirai-installer-154-16RC3.sh)"
+bash -c "$(wget -O- https://gist.githubusercontent.com/lss233/2fdd75be3f0724739368d0dcd9d1367d/raw/62a790da4a391af096074b3355c2c2b7ecab3c28/chatgpt-mirai-installer-gocqhttp.sh)"
```
@@ -102,22 +124,19 @@ bash -c "$(curl -fsSL https://gist.githubusercontent.com/lss233/54f0f794f2157665
# 修改 /path/to/config.cfg 为你 config.cfg 的位置
# XPRA_PASSWORD=123456 中的 123456 是你的 Xpra 密码,建议修改
docker run --name mirai-chatgpt-bot \
- -e XPRA_PASSWORD=123456 \
-v /path/to/config.cfg:/app/config.cfg \
--network host \
lss233/chatgpt-mirai-qq-bot:browser-version
```
-3. 启动后,在浏览器访问 `http://你的服务器IP:14500` 可以访问到登录 ChatGPT 的浏览器页面
-
- Windows: 快速部署包 (自带 Mirai,新人推荐)
+ Windows: 快速部署包 (自带 Mirai/go-cqhttp,新人推荐)
我们为 Windows 用户制作了一个快速启动包,可以在 [Release](https://github.com/lss233/chatgpt-mirai-qq-bot/releases) 中找到。
-文件名为:`quickstart-windows-amd64.zip` 或者 `Windows快速部署包.zip`
+文件名为:`quickstart-windows-go-cqhttp-amd64.zip`(推荐) 或者 `quickstart-windows-mirai-amd64.zip`
diff --git a/adapter/baidu/yiyan.py b/adapter/baidu/yiyan.py
index aa44982e..9ce6157e 100644
--- a/adapter/baidu/yiyan.py
+++ b/adapter/baidu/yiyan.py
@@ -34,8 +34,10 @@ def __init__(self, session_id: str = ""):
super().__init__(session_id)
self.session_id = session_id
self.account = botManager.pick('yiyan-cookie')
+ self.acs_client = httpx.AsyncClient(proxies=self.account.proxy)
self.client = httpx.AsyncClient(proxies=self.account.proxy)
- self.__setup_headers()
+ self.__setup_headers(self.acs_client)
+ self.__setup_headers(self.client)
self.conversation_id = None
self.parent_chat_id = ''
@@ -56,14 +58,20 @@ async def rollback(self):
async def on_reset(self):
await self.client.aclose()
self.client = httpx.AsyncClient(proxies=self.account.proxy)
- self.__setup_headers()
+ self.__setup_headers(self.client)
self.conversation_id = None
self.parent_chat_id = 0
- def __setup_headers(self):
- self.client.headers['Cookie'] = self.account.cookie_content
- self.client.headers['Content-Type'] = 'application/json;charset=UTF-8'
- self.client.headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/111.0.0.0 Safari/537.36'
+ def __setup_headers(self, client):
+ client.headers['Cookie'] = f"BDUSS={self.account.BDUSS};"
+ client.headers['Content-Type'] = 'application/json;charset=UTF-8'
+ client.headers['User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
+ client.headers['Sec-Fetch-User'] = '?1'
+ client.headers['Sec-Fetch-Mode'] = 'navigate'
+ client.headers['Sec-Fetch-Site'] = 'none'
+ client.headers['Sec-Ch-Ua-Platform'] = '"Windows"'
+ client.headers['Sec-Ch-Ua-Mobile'] = '?0'
+ client.headers['Sec-Ch-Ua'] = '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"'
async def new_conversation(self, prompt: str):
self.client.headers['Acs-Token'] = await self.get_sign()
@@ -73,6 +81,7 @@ async def new_conversation(self, prompt: str):
json={
"sessionName": prompt,
"timestamp": get_ts(),
+ "plugins": [],
"deviceType": "pc"
}
)
@@ -87,6 +96,18 @@ async def ask(self, prompt) -> Generator[str, None, None]:
if not self.conversation_id:
await self.new_conversation(prompt)
+ req = await self.client.post(
+ url="https://yiyan.baidu.com/eb/chat/check",
+ json={
+ "text": prompt,
+ "timestamp": get_ts(),
+ "deviceType": "pc",
+ }
+ )
+
+ req.raise_for_status()
+ self.__check_response(req.json())
+
req = await self.client.post(
url="https://yiyan.baidu.com/eb/chat/new",
json={
@@ -98,7 +119,10 @@ async def ask(self, prompt) -> Generator[str, None, None]:
"deviceType": "pc",
"code": 0,
"msg": "",
- "sign": await self.get_sign()
+ "plugins": [],
+ "pluginInfo": "",
+ "jt": "",
+ "sign": self.client.headers['Acs-Token']
}
)
@@ -123,7 +147,7 @@ async def ask(self, prompt) -> Generator[str, None, None]:
"stop": 0,
"timestamp": get_ts(),
"deviceType": "pc",
- "sign": await self.get_sign()
+ "sign": self.client.headers['Acs-Token']
}
)
req.raise_for_status()
@@ -162,7 +186,9 @@ def __check_response(self, resp):
raise Exception(resp['msg'])
async def get_sign(self):
- req = await self.client.get("https://chatgpt-proxy.lss233.com/yiyan-api/acs")
+ # 目前只需要这一个参数来计算 Acs-Token
+ self.acs_client.headers['Cookie'] = f"BAIDUID={self.account.BAIDUID};"
+ req = await self.acs_client.get("https://chatgpt-proxy.lss233.com/yiyan-api/acs", timeout=30)
return req.json()['acs']
async def __download_image(self, url: str) -> bytes:
diff --git a/adapter/chatgpt/api.py b/adapter/chatgpt/api.py
index e379c5b4..0d780c2a 100644
--- a/adapter/chatgpt/api.py
+++ b/adapter/chatgpt/api.py
@@ -1,54 +1,135 @@
-import ctypes
-import os
-from typing import Generator
-import openai
+import json
+import time
+import aiohttp
+import async_timeout
+import tiktoken
from loguru import logger
-from revChatGPT.V3 import Chatbot as OpenAIChatbot
+from typing import AsyncGenerator
from adapter.botservice import BotAdapter
from config import OpenAIAPIKey
from constants import botManager, config
-hashu = lambda word: ctypes.c_uint64(hash(word)).value
+DEFAULT_ENGINE: str = "gpt-3.5-turbo"
+
+
+class OpenAIChatbot:
+ def __init__(self, api_info: OpenAIAPIKey):
+ self.api_key = api_info.api_key
+ self.proxy = api_info.proxy
+ self.presence_penalty = config.openai.gpt_params.presence_penalty
+ self.frequency_penalty = config.openai.gpt_params.frequency_penalty
+ self.top_p = config.openai.gpt_params.top_p
+ self.temperature = config.openai.gpt_params.temperature
+ self.max_tokens = config.openai.gpt_params.max_tokens
+ self.engine = api_info.model or DEFAULT_ENGINE
+ self.timeout = config.response.max_timeout
+ self.conversation: dict[str, list[dict]] = {
+ "default": [
+ {
+ "role": "system",
+ "content": "You are ChatGPT, a large language model trained by OpenAI.\nKnowledge cutoff: 2021-09\nCurrent date:[current date]",
+ },
+ ],
+ }
+
+ async def rollback(self, session_id: str = "default", n: int = 1) -> None:
+ try:
+ if session_id not in self.conversation:
+ raise ValueError(f"会话 ID {session_id} 不存在。")
+
+ if n > len(self.conversation[session_id]):
+ raise ValueError(f"回滚次数 {n} 超过了会话 {session_id} 的消息数量。")
+
+ for _ in range(n):
+ self.conversation[session_id].pop()
+
+ except ValueError as ve:
+ logger.error(ve)
+ raise
+ except Exception as e:
+ logger.error(f"未知错误: {e}")
+ raise
+
+ def add_to_conversation(self, message: str, role: str, session_id: str = "default") -> None:
+ if role and message is not None:
+ self.conversation[session_id].append({"role": role, "content": message})
+ else:
+ logger.warning("出现错误!返回消息为空,不添加到会话。")
+ raise ValueError("出现错误!返回消息为空,不添加到会话。")
+
+ # https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
+ def count_tokens(self, session_id: str = "default", model: str = DEFAULT_ENGINE):
+ """Return the number of tokens used by a list of messages."""
+ if model is None:
+ model = DEFAULT_ENGINE
+ try:
+ encoding = tiktoken.encoding_for_model(model)
+ except KeyError:
+ encoding = tiktoken.get_encoding("cl100k_base")
+
+ tokens_per_message = 4
+ tokens_per_name = 1
+
+ num_tokens = 0
+ for message in self.conversation[session_id]:
+ num_tokens += tokens_per_message
+ for key, value in message.items():
+ if value is not None:
+ num_tokens += len(encoding.encode(value))
+ if key == "name":
+ num_tokens += tokens_per_name
+ num_tokens += 3 # every reply is primed with assistant
+ return num_tokens
+
+ def get_max_tokens(self, session_id: str, model: str) -> int:
+ """Get max tokens"""
+ return self.max_tokens - self.count_tokens(session_id, model)
class ChatGPTAPIAdapter(BotAdapter):
api_info: OpenAIAPIKey = None
"""API Key"""
- bot: OpenAIChatbot = None
- """实例"""
-
- hashed_user_id: str
-
def __init__(self, session_id: str = "unknown"):
+ self.latest_role = None
self.__conversation_keep_from = 0
self.session_id = session_id
- self.hashed_user_id = "user-" + hashu("session_id").to_bytes(8, "big").hex()
self.api_info = botManager.pick('openai-api')
- self.bot = OpenAIChatbot(
- api_key=self.api_info.api_key,
- proxy=self.api_info.proxy,
- presence_penalty=config.openai.gpt3_params.presence_penalty,
- frequency_penalty=config.openai.gpt3_params.frequency_penalty,
- top_p=config.openai.gpt3_params.top_p,
- temperature=config.openai.gpt3_params.temperature,
- max_tokens=config.openai.gpt3_params.max_tokens,
- )
+ self.bot = OpenAIChatbot(self.api_info)
self.conversation_id = None
self.parent_id = None
super().__init__()
self.bot.conversation[self.session_id] = []
- self.current_model = "gpt-3.5-turbo"
+ self.current_model = self.bot.engine
self.supported_models = [
"gpt-3.5-turbo",
"gpt-3.5-turbo-0301",
+ "gpt-3.5-turbo-0613",
+ "gpt-3.5-turbo-16k",
+ "gpt-3.5-turbo-16k-0613",
"gpt-4",
"gpt-4-0314",
"gpt-4-32k",
"gpt-4-32k-0314",
+ "gpt-4-0613",
+ "gpt-4-32k-0613",
]
+ def manage_conversation(self, session_id: str, prompt: str):
+ if session_id not in self.bot.conversation:
+ self.bot.conversation[session_id] = [
+ {"role": "system", "content": prompt}
+ ]
+ self.__conversation_keep_from = 1
+
+ while self.bot.max_tokens - self.bot.count_tokens(session_id) < config.openai.gpt_params.min_tokens and \
+ len(self.bot.conversation[session_id]) > self.__conversation_keep_from:
+ self.bot.conversation[session_id].pop(self.__conversation_keep_from)
+ logger.debug(
+ f"清理 token,历史记录遗忘后使用 token 数:{str(self.bot.count_tokens(session_id))}"
+ )
+
async def switch_model(self, model_name):
self.current_model = model_name
self.bot.engine = self.current_model
@@ -56,7 +137,7 @@ async def switch_model(self, model_name):
async def rollback(self):
if len(self.bot.conversation[self.session_id]) <= 0:
return False
- self.bot.rollback(convo_id=self.session_id, n=2)
+ await self.bot.rollback(self.session_id, n=2)
return True
async def on_reset(self):
@@ -64,41 +145,185 @@ async def on_reset(self):
self.bot.api_key = self.api_info.api_key
self.bot.proxy = self.api_info.proxy
self.bot.conversation[self.session_id] = []
+ self.bot.engine = self.current_model
self.__conversation_keep_from = 0
- async def ask(self, prompt: str) -> Generator[str, None, None]:
+ def construct_data(self, messages: list = None, api_key: str = None, stream: bool = True):
+ headers = {
+ 'Content-Type': 'application/json',
+ 'Authorization': f'Bearer {api_key}'
+ }
+ data = {
+ 'model': self.bot.engine,
+ 'messages': messages,
+ 'stream': stream,
+ 'temperature': self.bot.temperature,
+ 'top_p': self.bot.top_p,
+ 'presence_penalty': self.bot.presence_penalty,
+ 'frequency_penalty': self.bot.frequency_penalty,
+ "user": 'user',
+ 'max_tokens': self.bot.get_max_tokens(self.session_id, self.bot.engine),
+ }
+ return headers, data
+
+ def _prepare_request(self, session_id: str = None, messages: list = None, stream: bool = False):
self.api_info = botManager.pick('openai-api')
- self.bot.api_key = self.api_info.api_key
- self.bot.proxy = self.api_info.proxy
- self.bot.session.proxies.update(
- {
- "http": self.bot.proxy,
- "https": self.bot.proxy,
- },
- )
+ api_key = self.api_info.api_key
+ proxy = self.api_info.proxy
+ api_endpoint = config.openai.api_endpoint or "https://api.openai.com/v1"
- if self.session_id not in self.bot.conversation:
- self.bot.conversation[self.session_id] = [
- {"role": "system", "content": self.bot.system_prompt}
- ]
- self.__conversation_keep_from = 1
+ if not messages:
+ messages = self.bot.conversation[session_id]
- while self.bot.max_tokens - self.bot.get_token_count(self.session_id) < config.openai.gpt3_params.min_tokens and \
- len(self.bot.conversation[self.session_id]) > self.__conversation_keep_from:
- self.bot.conversation[self.session_id].pop(self.__conversation_keep_from)
- logger.debug(
- f"清理 token,历史记录遗忘后使用 token 数:{str(self.bot.get_token_count(self.session_id))}"
- )
+ headers, data = self.construct_data(messages, api_key, stream)
- os.environ['API_URL'] = f'{openai.api_base}/chat/completions'
- full_response = ''
- async for resp in self.bot.ask_stream_async(prompt=prompt, role=self.hashed_user_id, convo_id=self.session_id):
- full_response += resp
- yield full_response
- logger.debug(f"[ChatGPT-API:{self.bot.engine}] 响应:{full_response}")
- logger.debug(f"使用 token 数:{str(self.bot.get_token_count(self.session_id))}")
+ return proxy, api_endpoint, headers, data
+
+ async def _process_response(self, resp, session_id: str = None):
+
+ result = await resp.json()
+
+ total_tokens = result.get('usage', {}).get('total_tokens', None)
+ logger.debug(f"[ChatGPT-API:{self.bot.engine}] 使用 token 数:{total_tokens}")
+ if total_tokens is None:
+ raise Exception("Response does not contain 'total_tokens'")
+
+ content = result.get('choices', [{}])[0].get('message', {}).get('content', None)
+ logger.debug(f"[ChatGPT-API:{self.bot.engine}] 响应:{content}")
+ if content is None:
+ raise Exception("Response does not contain 'content'")
+
+ response_role = result.get('choices', [{}])[0].get('message', {}).get('role', None)
+ if response_role is None:
+ raise Exception("Response does not contain 'role'")
+
+ self.bot.add_to_conversation(content, response_role, session_id)
+
+ return content
+
+ async def request(self, session_id: str = None, messages: list = None) -> str:
+ proxy, api_endpoint, headers, data = self._prepare_request(session_id, messages, stream=False)
+
+ async with aiohttp.ClientSession() as session:
+ with async_timeout.timeout(self.bot.timeout):
+ async with session.post(f'{api_endpoint}/chat/completions', headers=headers,
+ data=json.dumps(data), proxy=proxy) as resp:
+ if resp.status != 200:
+ response_text = await resp.text()
+ raise Exception(
+ f"{resp.status} {resp.reason} {response_text}",
+ )
+ return await self._process_response(resp, session_id)
+
+ async def request_with_stream(self, session_id: str = None, messages: list = None) -> AsyncGenerator[str, None]:
+ proxy, api_endpoint, headers, data = self._prepare_request(session_id, messages, stream=True)
+
+ async with aiohttp.ClientSession() as session:
+ with async_timeout.timeout(self.bot.timeout):
+ async with session.post(f'{api_endpoint}/chat/completions', headers=headers, data=json.dumps(data),
+ proxy=proxy) as resp:
+ if resp.status != 200:
+ response_text = await resp.text()
+ raise Exception(
+ f"{resp.status} {resp.reason} {response_text}",
+ )
+
+ response_role: str = ''
+ completion_text: str = ''
+
+ async for line in resp.content:
+ try:
+ line = line.decode('utf-8').strip()
+ if not line.startswith("data: "):
+ continue
+ line = line[len("data: "):]
+ if line == "[DONE]":
+ break
+ if not line:
+ continue
+ event = json.loads(line)
+ except json.JSONDecodeError:
+ raise Exception(f"JSON解码错误: {line}") from None
+ except Exception as e:
+ logger.error(f"未知错误: {e}\n响应内容: {resp.content}")
+ logger.error("请将该段日记提交到项目issue中,以便修复该问题。")
+ raise Exception(f"未知错误: {e}") from None
+ if 'error' in event:
+ raise Exception(f"响应错误: {event['error']}")
+ if 'choices' in event and len(event['choices']) > 0 and 'delta' in event['choices'][0]:
+ delta = event['choices'][0]['delta']
+ if 'role' in delta:
+ if delta['role'] is not None:
+ response_role = delta['role']
+ if 'content' in delta:
+ event_text = delta['content']
+ if event_text is not None:
+ completion_text += event_text
+ self.latest_role = response_role
+ yield event_text
+ self.bot.add_to_conversation(completion_text, response_role, session_id)
+
+ async def compressed_session(self, session_id: str):
+ if session_id not in self.bot.conversation or not self.bot.conversation[session_id]:
+ logger.debug(f"不存在该会话,不进行压缩: {session_id}")
+ return
+
+ if self.bot.count_tokens(session_id) > config.openai.gpt_params.compressed_tokens:
+ logger.debug('开始进行会话压缩')
+
+ filtered_data = [entry for entry in self.bot.conversation[session_id] if entry['role'] != 'system']
+ self.bot.conversation[session_id] = [entry for entry in self.bot.conversation[session_id] if
+ entry['role'] not in ['assistant', 'user']]
+
+ filtered_data.append(({"role": "system",
+ "content": "Summarize the discussion briefly in 200 words or less to use as a prompt for future context."}))
+
+ async for text in self.request_with_stream(session_id=session_id, messages=filtered_data):
+ pass
+
+ token_count = self.bot.count_tokens(self.session_id, self.bot.engine)
+ logger.debug(f"压缩会话后使用 token 数:{token_count}")
+
+ async def ask(self, prompt: str) -> AsyncGenerator[str, None]:
+ """Send a message to api and return the response with stream."""
+
+ self.manage_conversation(self.session_id, prompt)
+
+ if config.openai.gpt_params.compressed_session:
+ await self.compressed_session(self.session_id)
+
+ event_time = None
+
+ try:
+ if self.bot.engine not in self.supported_models:
+ logger.warning(f"当前模型非官方支持的模型,请注意控制台输出,当前使用的模型为 {self.bot.engine}")
+ logger.debug(f"[尝试使用ChatGPT-API:{self.bot.engine}] 请求:{prompt}")
+ self.bot.add_to_conversation(prompt, "user", session_id=self.session_id)
+ start_time = time.time()
+ logger.debug(self.bot.conversation[self.session_id])
+ full_response = ''
+
+ if config.openai.gpt_params.stream:
+ async for resp in self.request_with_stream(session_id=self.session_id):
+ full_response += resp
+ yield full_response
+
+ token_count = self.bot.count_tokens(self.session_id, self.bot.engine)
+ logger.debug(f"[ChatGPT-API:{self.bot.engine}] 响应:{full_response}")
+ logger.debug(f"[ChatGPT-API:{self.bot.engine}] 使用 token 数:{token_count}")
+ else:
+ yield await self.request(session_id=self.session_id)
+ event_time = time.time() - start_time
+ if event_time is not None:
+ logger.debug(f"[ChatGPT-API:{self.bot.engine}] 接收到全部消息花费了{event_time:.2f}秒")
+
+ except Exception as e:
+ logger.error(f"[ChatGPT-API:{self.bot.engine}] 请求失败:\n{e}")
+ yield f"发生错误: \n{e}"
+ raise
async def preset_ask(self, role: str, text: str):
+ self.bot.engine = self.current_model
if role.endswith('bot') or role in {'assistant', 'chatgpt'}:
logger.debug(f"[预设] 响应:{text}")
yield text
diff --git a/adapter/chatgpt/web.py b/adapter/chatgpt/web.py
index 9251bc66..35a3fb86 100644
--- a/adapter/chatgpt/web.py
+++ b/adapter/chatgpt/web.py
@@ -1,6 +1,7 @@
import datetime
from typing import Generator, Union
+import revChatGPT
from loguru import logger
from adapter.botservice import BotAdapter
@@ -40,6 +41,9 @@ def __init__(self, session_id: str = "unknown"):
if self.bot.account.paid:
self.supported_models.append('text-davinci-002-render-paid')
self.supported_models.append('gpt-4')
+ self.supported_models.append('gpt-4-mobile')
+ self.supported_models.append('gpt-4-browsing')
+ self.supported_models.append('gpt-4-plugins')
async def switch_model(self, model_name):
if (
@@ -55,6 +59,7 @@ async def switch_model(self, model_name):
async def rollback(self):
if len(self.parent_id_prev_queue) <= 0:
return False
+ self.conversation_id = self.conversation_id_prev_queue.pop()
self.parent_id = self.parent_id_prev_queue.pop()
return True
@@ -65,7 +70,7 @@ async def on_reset(self):
and self.conversation_id is not None
):
await self.bot.delete_conversation(self.conversation_id)
- except:
+ except Exception:
logger.warning("删除会话记录失败。")
self.conversation_id = None
self.parent_id = None
@@ -91,29 +96,31 @@ async def ask(self, prompt: str) -> Generator[str, None, None]:
# 确保是当前的会话,才更新 parent_id
if self.conversation_id == resp["conversation_id"]:
self.parent_id = resp["parent_id"]
+
yield resp["message"]
if last_response:
logger.debug(f"[ChatGPT-Web] {last_response['conversation_id']} - {last_response['message']}")
except AttributeError as e:
if str(e).startswith("'str' object has no attribute 'get'"):
yield "出现故障,请发送”{reset}“重新开始!".format(reset=config.trigger.reset_command)
- except V1Error as e:
- if e.code == 2:
+ except revChatGPT.typings.Error as e:
+ if e.code == 429:
current_time = datetime.datetime.now()
self.bot.refresh_accessed_at()
logger.debug(f"[ChatGPT-Web] accessed at: {str(self.bot.accessed_at)}")
first_accessed_at = self.bot.accessed_at[0] if len(self.bot.accessed_at) > 0 \
- else current_time - datetime.timedelta(hours=1)
- remaining = divmod(current_time - first_accessed_at, datetime.timedelta(seconds=60))
+ else current_time
+ next_available_time = first_accessed_at + datetime.timedelta(hours=1)
+ remaining = divmod(next_available_time - current_time, datetime.timedelta(seconds=60))
minute = remaining[0]
second = remaining[1].seconds
- raise BotRatelimitException(f"{minute}分{second}秒")
+ raise BotRatelimitException(f"{minute}分{second}秒") from e
if e.code == 6:
- raise ConcurrentMessageException()
+ raise ConcurrentMessageException() from e
raise e
except Exception as e:
if "Only one message at a time" in str(e):
- raise ConcurrentMessageException()
+ raise ConcurrentMessageException() from e
raise e
def get_queue_info(self):
diff --git a/adapter/claude/slack.py b/adapter/claude/slack.py
new file mode 100644
index 00000000..f0e21a7c
--- /dev/null
+++ b/adapter/claude/slack.py
@@ -0,0 +1,110 @@
+import uuid
+
+import json
+from typing import Generator
+from adapter.botservice import BotAdapter
+from config import SlackAppAccessToken
+from constants import botManager
+from exceptions import BotOperationNotSupportedException
+from loguru import logger
+import httpx
+
+
+class ClaudeInSlackAdapter(BotAdapter):
+ account: SlackAppAccessToken
+ client: httpx.AsyncClient
+
+ def __init__(self, session_id: str = ""):
+ super().__init__(session_id)
+ self.session_id = session_id
+ self.account = botManager.pick('slack-accesstoken')
+ self.client = httpx.AsyncClient(proxies=self.account.proxy)
+ self.__setup_headers(self.client)
+ self.conversation_id = None
+ self.current_model = "claude"
+ self.supported_models = [
+ "claude"
+ ]
+
+ async def switch_model(self, model_name):
+ self.current_model = model_name
+
+ async def rollback(self):
+ raise BotOperationNotSupportedException()
+
+ async def on_reset(self):
+ await self.client.aclose()
+ self.client = httpx.AsyncClient(proxies=self.account.proxy)
+ self.__setup_headers(self.client)
+ self.conversation_id = None
+
+ def __setup_headers(self, client):
+ client.headers['Authorization'] = f"Bearer {self.account.channel_id}@{self.account.access_token}"
+ client.headers['Content-Type'] = 'application/json;charset=UTF-8'
+ client.headers[
+ 'User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
+ client.headers['Sec-Fetch-User'] = '?1'
+ client.headers['Sec-Fetch-Mode'] = 'navigate'
+ client.headers['Sec-Fetch-Site'] = 'none'
+ client.headers['Sec-Ch-Ua-Platform'] = '"Windows"'
+ client.headers['Sec-Ch-Ua-Mobile'] = '?0'
+ client.headers['Sec-Ch-Ua'] = '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"'
+
+ async def ask(self, prompt) -> Generator[str, None, None]:
+
+ payload = {
+ "action": "next",
+ "messages": [
+ {
+ "id": str(uuid.uuid4()),
+ "role": "user",
+ "author": {
+ "role": "user"
+ },
+ "content": {
+ "content_type": "text",
+ "parts": [
+ prompt
+ ]
+ }
+ }
+ ],
+ "conversation_id": self.conversation_id,
+ "parent_message_id": str(uuid.uuid4()),
+ "model": self.current_model
+ }
+
+ async with self.client.stream(
+ method="POST",
+ url=f"{self.account.app_endpoint}conversation",
+ json=payload,
+ timeout=60
+ ) as response:
+ response.raise_for_status()
+ async for line in response.aiter_lines():
+ if not line or line is None:
+ continue
+ if "data: " in line:
+ line = line[6:]
+ if "[DONE]" in line:
+ break
+
+ try:
+ line = json.loads(line)
+ except json.decoder.JSONDecodeError:
+ continue
+
+ message: str = line["message"]["content"]["parts"][0]
+ self.conversation_id = line["conversation_id"]
+ yield message
+
+ async def preset_ask(self, role: str, text: str):
+ if role.endswith('bot') or role in {'assistant', 'claude'}:
+ logger.debug(f"[预设] 响应:{text}")
+ yield text
+ else:
+ logger.debug(f"[预设] 发送:{text}")
+ item = None
+ async for item in self.ask(text): ...
+ if item:
+ logger.debug(f"[预设] Chatbot 回应:{item}")
diff --git a/adapter/google/bard.py b/adapter/google/bard.py
index bef8f95f..841f32be 100644
--- a/adapter/google/bard.py
+++ b/adapter/google/bard.py
@@ -2,13 +2,14 @@
from typing import Generator
from adapter.botservice import BotAdapter
+import json
+from urllib.parse import quote
+from exceptions import BotOperationNotSupportedException
from config import BardCookiePath
from constants import botManager
-from exceptions import BotOperationNotSupportedException
from loguru import logger
-import json
import httpx
-from urllib.parse import quote
+
hashu = lambda word: ctypes.c_uint64(hash(word)).value
@@ -75,9 +76,9 @@ async def ask(self, prompt: str) -> Generator[str, None, None]:
for lines in res:
if "wrb.fr" in lines:
data = json.loads(json.loads(lines)[0][2])
- result = data[0][0]
+ result = data[4][0][1][0]
self.bard_session_id = data[1][0]
- self.r = data[1][1] # 用于下一次请求, 这个位置是固定的
+ self.r = data[1][1] # 用于下一次请求, 这个位置是固定的
# self.rc = data[4][1][0]
for check in data:
if not check:
diff --git a/adapter/ms/bing.py b/adapter/ms/bing.py
index f7118211..5cd39d40 100644
--- a/adapter/ms/bing.py
+++ b/adapter/ms/bing.py
@@ -1,20 +1,27 @@
import json
+from io import BytesIO
from typing import Generator, Union, List
import aiohttp
+import re
import asyncio
+from PIL import Image
+
from constants import config
from adapter.botservice import BotAdapter
-from EdgeGPT import Chatbot as EdgeChatbot, ConversationStyle
+from EdgeGPT.EdgeGPT import Chatbot as EdgeChatbot, ConversationStyle, NotAllowedToAccess
+from contextlib import suppress
from constants import botManager
from drawing import DrawingAPI
from exceptions import BotOperationNotSupportedException
from loguru import logger
-import re
-from ImageGen import ImageGenAsync
+from EdgeGPT.ImageGen import ImageGenAsync
from graia.ariadne.message.element import Image as GraiaImage
+image_pattern = r"!\[.*\]\((.*)\)"
+
+
class BingAdapter(BotAdapter, DrawingAPI):
cookieData = None
count: int = 0
@@ -36,8 +43,10 @@ def __init__(self, session_id: str = "unknown", conversation_style: Conversation
for line in account.cookie_content.split("; "):
name, value = line.split("=", 1)
self.cookieData.append({"name": name, "value": value})
-
- self.bot = EdgeChatbot(cookies=self.cookieData, proxy=account.proxy)
+ try:
+ self.bot = EdgeChatbot(cookies=self.cookieData, proxy=account.proxy)
+ except NotAllowedToAccess as e:
+ raise Exception("Bing 账号 Cookie 已过期,请联系管理员更新!") from e
async def rollback(self):
raise BotOperationNotSupportedException()
@@ -49,72 +58,114 @@ async def on_reset(self):
async def ask(self, prompt: str) -> Generator[str, None, None]:
self.count = self.count + 1
parsed_content = ''
+ image_urls = []
try:
async for final, response in self.bot.ask_stream(prompt=prompt,
conversation_style=self.conversation_style,
- wss_link=config.bing.wss_link):
- if not final:
- response = re.sub(r"\[\^\d+\^\]", "", response)
- if config.bing.show_references:
- response = re.sub(r"\[(\d+)\]: ", r"\1: ", response)
- else:
- response = re.sub(r"(\[\d+\]\: .+)+", "", response)
- parsed_content = response
-
- else:
- try:
+ wss_link=config.bing.wss_link,
+ locale="zh-cn"):
+ if not response:
+ continue
+
+ if final:
+ # 最后一条消息
+ max_messages = config.bing.max_messages
+ with suppress(KeyError):
max_messages = response["item"]["throttling"]["maxNumUserMessagesInConversation"]
- except Exception:
- max_messages = config.bing.max_messages
- if config.bing.show_remaining_count:
- remaining_conversations = f'\n剩余回复数:{self.count} / {max_messages} '
- else:
- remaining_conversations = ''
+
+ with suppress(KeyError):
+ raw_text = response["item"]["messages"][1]["adaptiveCards"][0]["body"][0]["text"]
+ image_urls = re.findall(image_pattern, raw_text)
+
+ remaining_conversations = f'\n剩余回复数:{self.count} / {max_messages} ' \
+ if config.bing.show_remaining_count else ''
+
if len(response["item"].get('messages', [])) > 1 and config.bing.show_suggestions:
suggestions = response["item"]["messages"][-1].get("suggestedResponses", [])
if len(suggestions) > 0:
parsed_content = parsed_content + '\n猜你想问: \n'
for suggestion in suggestions:
parsed_content = f"{parsed_content}* {suggestion.get('text')} \n"
- yield parsed_content
+
parsed_content = parsed_content + remaining_conversations
- # not final的parsed_content已经yield走了,只能在末尾加剩余回复数,或者改用EdgeGPT自己封装的ask之后再正则替换
+
if parsed_content == remaining_conversations: # No content
yield "Bing 已结束本次会话。继续发送消息将重新开启一个新会话。"
await self.on_reset()
return
+ else:
+ # 生成中的消息
+ parsed_content = re.sub(r"Searching the web for:(.*)\n", "", response)
+ parsed_content = re.sub(r"```json(.*)```", "", parsed_content,flags=re.DOTALL)
+ parsed_content = re.sub(r"Generating answers for you...", "", parsed_content)
+ if config.bing.show_references:
+ parsed_content = re.sub(r"\[(\d+)\]: ", r"\1: ", parsed_content)
+ else:
+ parsed_content = re.sub(r"(\[\d+\]\: .+)+", "", parsed_content)
+ parts = re.split(image_pattern, parsed_content)
+ # 图片单独保存
+ parsed_content = parts[0]
+
+ if len(parts) > 2:
+ parsed_content = parsed_content + parts[-1]
yield parsed_content
logger.debug(f"[Bing AI 响应] {parsed_content}")
+ image_tasks = [
+ asyncio.create_task(self.__download_image(url))
+ for url in image_urls
+ ]
+ for image in await asyncio.gather(*image_tasks):
+ yield image
except (asyncio.exceptions.TimeoutError, asyncio.exceptions.CancelledError) as e:
raise e
- except Exception as e:
- logger.exception(e)
- yield "Bing 已结束本次会话。继续发送消息将重新开启一个新会话。"
- await self.on_reset()
+ except NotAllowedToAccess:
+ yield "出现错误:机器人的 Bing Cookie 可能已过期,或者机器人当前使用的 IP 无法使用 Bing AI。"
return
+ except Exception as e:
+ if str(e) == 'Redirect failed':
+ yield '画图失败:Redirect failed'
+ return
+ raise e
async def text_to_img(self, prompt: str):
logger.debug(f"[Bing Image] Prompt: {prompt}")
+ try:
+ async with ImageGenAsync(
+ all_cookies=self.bot.chat_hub.cookies,
+ quiet=True
+ ) as image_generator:
+ images = await image_generator.get_images(prompt)
+
+ logger.debug(f"[Bing Image] Response: {images}")
+ tasks = [asyncio.create_task(self.__download_image(image)) for image in images]
+ return await asyncio.gather(*tasks)
+ except Exception as e:
+ if str(e) == 'Redirect failed':
+ raise Exception('画图失败:Redirect failed') from e
+ raise e
- async with ImageGenAsync(
- next((cookie['value'] for cookie in self.bot.cookies if cookie['name'] == '_U'), None),
- False
- ) as image_generator:
- images = await image_generator.get_images(prompt)
-
- logger.debug(f"[Bing Image] Response: {images}")
- tasks = [asyncio.create_task(self.__download_image(image)) for image in images]
- return await asyncio.gather(*tasks)
async def img_to_img(self, init_images: List[GraiaImage], prompt=''):
return await self.text_to_img(prompt)
- async def __download_image(self, url):
+ async def __download_image(self, url) -> GraiaImage:
+ logger.debug(f"[Bing AI] 下载图片:{url}")
+
async with aiohttp.ClientSession() as session:
async with session.get(url, proxy=self.bot.proxy) as resp:
- if resp.status == 200:
- return GraiaImage(data_bytes=await resp.read())
+ resp.raise_for_status()
+ logger.debug(f"[Bing AI] 下载完成:{resp.content_type} {url}")
+ return GraiaImage(data_bytes=await resp.read())
async def preset_ask(self, role: str, text: str):
- yield None # Bing 不使用预设功能
+ if role.endswith('bot') or role in {'assistant', 'bing'}:
+ logger.debug(f"[预设] 响应:{text}")
+ yield text
+ else:
+ logger.debug(f"[预设] 发送:{text}")
+ item = None
+ async for item in self.ask(text): ...
+ if item:
+ logger.debug(f"[预设] Chatbot 回应:{item}")
+
diff --git a/adapter/quora/poe.py b/adapter/quora/poe.py
index da7f2b60..1f144ec0 100644
--- a/adapter/quora/poe.py
+++ b/adapter/quora/poe.py
@@ -11,14 +11,19 @@
class PoeBot(Enum):
- """Poe 支持的机器人:{'capybara': 'Sage', 'beaver': 'GPT-4', 'a2_2': 'Claude+','a2': 'Claude', 'chinchilla': 'ChatGPT',
- 'nutria': 'Dragonfly'} """
+ """Poe 支持的机器人:{'capybara': 'Assistant', 'a2': 'Claude-instant', 'beaver': 'GPT-4', 'chinchilla': 'ChatGPT',
+ 'llama_2_7b_chat': 'Llama-2-7b', 'a2_100k': 'Claude-instant-100k', 'llama_2_13b_chat': 'Llama-2-13b', 'agouti': 'ChatGPT-16k',
+ 'vizcacha': 'GPT-4-32k', 'acouchy': 'Google-PaLM', 'llama_2_70b_chat':'Llama-2-70b', 'a2_2': 'Claude-2-100k'} """
Sage = "capybara"
GPT4 = "beaver"
+ GPT432k = "vizcacha"
Claude2 = "a2_2"
Claude = "a2"
+ Claude100k = "a2_100k"
ChatGPT = "chinchilla"
- Dragonfly = "nutria"
+ ChatGPT16k = "agouti"
+ Llama2 = "llama_2_70b_chat"
+ PaLM = "acouchy"
@staticmethod
def parse(bot_name: str):
@@ -30,6 +35,7 @@ def parse(bot_name: str):
if str(bot.name).lower() == tmp_name
or str(bot.value).lower() == tmp_name
or f"poe-{str(bot.name).lower()}" == tmp_name
+ or f"poe-{str(bot.value).lower()}" == tmp_name
),
None,
)
@@ -69,14 +75,13 @@ async def ask(self, msg: str) -> Generator[str, None, None]:
self.poe_client.last_ask_time = time.time()
except Exception as e:
logger.warning(f"Poe connection error {str(e)}")
- if self.process_retry <= 3:
- new_poe_client = botManager.reset_bot(self.poe_client)
- self.poe_client = new_poe_client
- self.process_retry += 1
- async for resp in self.ask(msg):
- yield resp
- else:
+ if self.process_retry > 3:
raise e
+ new_poe_client = botManager.reset_bot(self.poe_client)
+ self.poe_client = new_poe_client
+ self.process_retry += 1
+ async for resp in self.ask(msg):
+ yield resp
def check_and_reset_client(self):
current_time = time.time()
@@ -92,13 +97,12 @@ async def rollback(self):
self.process_retry = 0
except Exception as e:
logger.warning(f"Poe connection error {str(e)}")
- if self.process_retry <= 3:
- new_poe_client = botManager.reset_bot(self.poe_client)
- self.poe_client = new_poe_client
- self.process_retry += 1
- await self.rollback()
- else:
+ if self.process_retry > 3:
raise e
+ new_poe_client = botManager.reset_bot(self.poe_client)
+ self.poe_client = new_poe_client
+ self.process_retry += 1
+ await self.rollback()
async def on_reset(self):
"""当会话被重置时,此函数被调用"""
@@ -107,10 +111,9 @@ async def on_reset(self):
self.process_retry = 0
except Exception as e:
logger.warning(f"Poe connection error {str(e)}")
- if self.process_retry <= 3:
- new_poe_client = botManager.reset_bot(self.poe_client)
- self.poe_client = new_poe_client
- self.process_retry += 1
- await self.on_reset()
- else:
+ if self.process_retry > 3:
raise e
+ new_poe_client = botManager.reset_bot(self.poe_client)
+ self.poe_client = new_poe_client
+ self.process_retry += 1
+ await self.on_reset()
diff --git a/adapter/xunfei/xinghuo.py b/adapter/xunfei/xinghuo.py
new file mode 100644
index 00000000..3bdf6e4a
--- /dev/null
+++ b/adapter/xunfei/xinghuo.py
@@ -0,0 +1,122 @@
+from io import BytesIO
+
+from typing import Generator
+
+from adapter.botservice import BotAdapter
+from config import XinghuoCookiePath
+from constants import botManager
+from exceptions import BotOperationNotSupportedException
+from loguru import logger
+import httpx
+import base64
+from PIL import Image
+
+
+class XinghuoAdapter(BotAdapter):
+ """
+ Credit: https://github.com/dfvips/xunfeixinghuo
+ """
+ account: XinghuoCookiePath
+ client: httpx.AsyncClient
+
+ def __init__(self, session_id: str = ""):
+ super().__init__(session_id)
+ self.session_id = session_id
+ self.account = botManager.pick('xinghuo-cookie')
+ self.client = httpx.AsyncClient(proxies=self.account.proxy)
+ self.__setup_headers(self.client)
+ self.conversation_id = None
+ self.parent_chat_id = ''
+
+ async def delete_conversation(self, session_id):
+ return await self.client.post("https://xinghuo.xfyun.cn/iflygpt/u/chat-list/v1/del-chat-list", json={
+ 'chatListId': session_id
+ })
+
+ async def rollback(self):
+ raise BotOperationNotSupportedException()
+
+ async def on_reset(self):
+ await self.client.aclose()
+ self.client = httpx.AsyncClient(proxies=self.account.proxy)
+ self.__setup_headers(self.client)
+ self.conversation_id = None
+ self.parent_chat_id = 0
+
+ def __setup_headers(self, client):
+ client.headers['Cookie'] = f"ssoSessionId={self.account.ssoSessionId};"
+ client.headers[
+ 'User-Agent'] = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.0.0 Safari/537.36'
+ client.headers['Sec-Fetch-User'] = '?1'
+ client.headers['Sec-Fetch-Mode'] = 'navigate'
+ client.headers['Sec-Fetch-Site'] = 'none'
+ client.headers['Sec-Ch-Ua-Platform'] = '"Windows"'
+ client.headers['Sec-Ch-Ua-Mobile'] = '?0'
+ client.headers['Sec-Ch-Ua'] = '"Chromium";v="112", "Google Chrome";v="112", "Not:A-Brand";v="99"'
+ client.headers['Origin'] = 'https://xinghuo.xfyun.cn'
+ client.headers['Referer'] = 'https://xinghuo.xfyun.cn/desk'
+ client.headers['Connection'] = 'keep-alive'
+ client.headers['X-Requested-With'] = 'XMLHttpRequest'
+
+ async def new_conversation(self):
+ req = await self.client.post(
+ url="https://xinghuo.xfyun.cn/iflygpt/u/chat-list/v1/create-chat-list",
+ json={}
+ )
+ req.raise_for_status()
+ self.__check_response(req.json())
+ self.conversation_id = req.json()['data']['id']
+ self.parent_chat_id = 0
+
+ async def ask(self, prompt) -> Generator[str, None, None]:
+ if not self.conversation_id:
+ await self.new_conversation()
+
+ full_response = ''
+ async with self.client.stream(
+ "POST",
+ url="https://xinghuo.xfyun.cn/iflygpt-chat/u/chat_message/chat",
+ data={
+ 'fd': self.account.fd,
+ 'chatId': self.conversation_id,
+ 'text': prompt,
+ 'GtToken': self.account.GtToken,
+ 'sid': self.account.sid,
+ 'clientType': '1',
+ 'isBot':'0'
+ },
+ ) as req:
+ async for line in req.aiter_lines():
+ if not line:
+ continue
+ if line == 'data:':
+ break
+ if line == 'data:[geeError]':
+ yield "错误:出现验证码,请到星火网页端发送一次消息再试。"
+ break
+ encoded_data = line[len("data:"):]
+ missing_padding = len(encoded_data) % 4
+ if missing_padding != 0:
+ encoded_data += '=' * (4 - missing_padding)
+ decoded_data = base64.b64decode(encoded_data).decode('utf-8')
+ if encoded_data != 'zw':
+ decoded_data = decoded_data.replace('\n\n', '\n')
+ full_response += decoded_data
+ yield full_response
+
+ logger.debug(f"[Xinghuo] {self.conversation_id} - {full_response}")
+
+ async def preset_ask(self, role: str, text: str):
+ if role.endswith('bot') or role in {'assistant', 'xinghuo'}:
+ logger.debug(f"[预设] 响应:{text}")
+ yield text
+ else:
+ logger.debug(f"[预设] 发送:{text}")
+ item = None
+ async for item in self.ask(text): ...
+ if item:
+ logger.debug(f"[预设] Chatbot 回应:{item}")
+
+ def __check_response(self, resp):
+ if int(resp['code']) != 0:
+ raise Exception(resp['msg'])
\ No newline at end of file
diff --git a/bot.py b/bot.py
index 63d37ced..e047c4e7 100644
--- a/bot.py
+++ b/bot.py
@@ -1,15 +1,13 @@
import os
import sys
-
-sys.path.append(os.getcwd())
import creart
+sys.path.append(os.getcwd())
from asyncio import AbstractEventLoop
import asyncio
from utils.exithooks import hook
from loguru import logger
from constants import config, botManager
-
-hook()
+from utils.edge_tts import load_edge_tts_voices
loop = creart.create(AbstractEventLoop)
@@ -17,27 +15,45 @@
bots = []
+if config.mirai:
+ logger.info("检测到 mirai 配置,将启动 mirai 模式……")
+ from platforms.ariadne_bot import start_task
+
+ bots.append(loop.create_task(start_task()))
+
if config.onebot:
logger.info("检测到 Onebot 配置,将启动 Onebot 模式……")
from platforms.onebot_bot import start_task
+
bots.append(loop.create_task(start_task()))
if config.telegram:
logger.info("检测到 telegram 配置,将启动 telegram bot 模式……")
from platforms.telegram_bot import start_task
+
bots.append(loop.create_task(start_task()))
if config.discord:
logger.info("检测到 discord 配置,将启动 discord bot 模式……")
from platforms.discord_bot import start_task
+
bots.append(loop.create_task(start_task()))
if config.http:
logger.info("检测到 http 配置,将启动 http service 模式……")
from platforms.http_service import start_task
+
bots.append(loop.create_task(start_task()))
-if config.mirai:
- logger.info("检测到 mirai 配置,将启动 mirai 模式……")
- from platforms.ariadne_bot import start_task
+if config.wecom:
+ logger.info("检测到 Wecom 配置,将启动 Wecom Bot 模式……")
+ from platforms.wecom_bot import start_task
+
bots.append(loop.create_task(start_task()))
+try:
+ logger.info("[Edge TTS] 读取 Edge TTS 可用音色列表……")
+ loop.run_until_complete(load_edge_tts_voices())
+ logger.info("[Edge TTS] 读取成功!")
+except Exception as e:
+ logger.exception(e)
+ logger.error("[Edge TTS] 读取失败!")
+hook()
loop.run_until_complete(asyncio.gather(*bots))
loop.run_forever()
-
diff --git a/chatbot/chatgpt.py b/chatbot/chatgpt.py
index 6ee98e8f..85572688 100644
--- a/chatbot/chatgpt.py
+++ b/chatbot/chatgpt.py
@@ -44,6 +44,8 @@ def refresh_accessed_at(self):
current_time = datetime.datetime.now()
while len(self.accessed_at) > 0 and current_time - self.accessed_at[0] > datetime.timedelta(hours=1):
self.accessed_at.pop(0)
+ if len(self.accessed_at) == 0:
+ self.accessed_at.append(current_time)
async def delete_conversation(self, conversation_id):
await self.bot.delete_conversation(conversation_id)
@@ -53,7 +55,8 @@ async def ask(self, prompt, conversation_id=None, parent_id=None, model=''):
# self.queue 已交给 MiddlewareConcurrentLock 处理,此处不处理
self.bot.conversation_id = conversation_id
self.bot.parent_id = parent_id
- async for r in self.bot.ask(prompt=prompt, conversation_id=conversation_id, parent_id=parent_id, model=model):
+ self.bot.config['model'] = model
+ async for r in self.bot.ask(prompt=prompt, conversation_id=conversation_id, parent_id=parent_id):
yield r
self.update_accessed_at()
diff --git a/command.py b/command.py
new file mode 100644
index 00000000..53963c36
--- /dev/null
+++ b/command.py
@@ -0,0 +1,192 @@
+import re
+from constants import config, botManager
+from loguru import logger
+
+from utils.text_to_speech import TtsVoiceManager
+
+
+class CommandContext:
+ def __init__(self, args, session_id, conversation_context, conversation_handler, respond, is_manager, task):
+ self.args = args
+ """命令的参数"""
+ self.session_id = session_id
+ """当前会话的ID"""
+ self.conversation_context = conversation_context
+ """当前会话的上下文"""
+ self.conversation_handler = conversation_handler
+ """当前会话的处理器"""
+ self.respond = respond
+ """回复消息的方法"""
+ self.is_manager = is_manager
+ """是否为管理员"""
+ self.task = task
+ """当前会话的任务"""
+
+
+class CommandHandler:
+ """
+ 命令处理器
+ 不需要处理任务的返回True
+ 需要处理任务的返回False
+ """
+ def __init__(self):
+ self.commands = {
+ r"切换模型 (.+)": self.handle_switch_model,
+ r"切换AI (.+)": self.handle_switch_ai,
+ r"切换语音 (.+)": self.handle_switch_voice,
+ r"重置会话": self.handle_reset_conversation,
+ r"回滚会话": self.handle_rollback_command,
+ r"图文混合模式": self.handle_mixed_only_command,
+ r"图片模式": self.handle_image_only_command,
+ r"文本模式": self.handle_text_only_command,
+ r"帮助|help": self.handle_help,
+ r"ping": self.handle_ping_command,
+ r"加载预设 (.+)": self.handle_load_preset_command,
+ }
+ self.command_descriptions = {
+ r"切换模型 (.+)": "切换当前上下文的模型,例如:切换模型 gpt-4",
+ r"切换AI (.+)": "切换AI的命令,例如:切换AI chatgpt-web",
+ r"切换语音 (.+)": "切换tts语音音色的命令,例如:切换语音 zh-CN-XiaoxiaoNeural",
+ r"重置会话": "重置当前上下文的会话",
+ r"回滚会话": "回滚当前上下文的会话",
+ r"图文混合模式": "切换当前上下文的渲染模式为图文混合模式",
+ r"图片模式": "切换当前上下文的渲染模式为图片模式",
+ r"文本模式": "切换当前上下文的渲染模式为文本模式",
+ r"帮助|help": "显示所有指令",
+ }
+
+ async def get_ping_response(self, conversation_context):
+ current_voice = conversation_context.conversation_voice.alias if conversation_context.conversation_voice else "无"
+ response = config.response.ping_response.format(current_ai=conversation_context.type,
+ current_voice=current_voice,
+ supported_ai=botManager.bots_info())
+ tts_voices = await TtsVoiceManager.list_tts_voices(
+ config.text_to_speech.engine, config.text_to_speech.default_voice_prefix)
+ if tts_voices:
+ supported_tts = ",".join([v.alias for v in tts_voices])
+ response += config.response.ping_tts_response.format(supported_tts=supported_tts)
+ return response
+
+ async def handle_load_preset_command(self, context):
+ preset_name = context.args[0]
+ logger.trace(f"{context.session_id} - 正在执行预设: {preset_name}")
+ async for _ in context.conversation_context.reset(): ...
+ context.task[0] = context.conversation_context.load_preset(preset_name)
+ if not context.conversation_context.preset:
+ # 当前没有预设
+ logger.trace(f"{context.session_id} - 未检测到预设,正在执行默认预设……")
+ # 隐式加载不回复预设内容
+ async for _ in context.conversation_context.load_preset('default'): ...
+ return False
+
+ async def handle_ping_command(self, context):
+ await context.respond(await self.get_ping_response(context.conversation_context))
+ return True
+
+ async def handle_help(self, context):
+ help_text = ""
+ for command, handler in self.commands.items():
+ if command == "帮助|help":
+ continue
+ description = self.command_descriptions.get(command, "")
+ help_text += f"{command}: {description}\n"
+ await context.respond(help_text)
+ return True
+
+ async def handle_mixed_only_command(self, context):
+ context.conversation_context.switch_renderer("mixed")
+ await context.respond("已切换至图文混合模式,接下来我的回复将会以图文混合的方式呈现!")
+ return True
+
+ async def handle_image_only_command(self, context):
+ context.conversation_context.switch_renderer("image")
+ await context.respond("已切换至纯图片模式,接下来我的回复将会以图片呈现!")
+ return True
+
+ async def handle_text_only_command(self, context):
+ context.conversation_context.switch_renderer("text")
+ await context.respond("已切换至纯文字模式,接下来我的回复将会以文字呈现(被吞除外)!")
+ return True
+
+ async def handle_switch_model(self, context):
+ model_name = context.args[0]
+ if model_name in context.conversation_context.supported_models:
+ if not (context.is_manager or model_name in config.trigger.allowed_models):
+ await context.respond(f"不好意思,只有管理员才能切换到 {model_name} 模型!")
+ else:
+ await context.conversation_context.switch_model(model_name)
+ await context.respond(f"已切换至 {model_name} 模型,让我们聊天吧!")
+ else:
+ logger.warning(f"模型 {model_name} 不在支持列表中,下次将尝试使用此模型创建对话。")
+ await context.conversation_context.switch_model(model_name)
+ await context.respond(
+ f"模型 {model_name} 不在支持列表中,下次将尝试使用此模型创建对话,目前AI仅支持:{context.conversation_context.supported_models}!")
+ return True
+
+ async def handle_switch_ai(self, context):
+ bot_type_search = context.args[0]
+ if not (config.trigger.allow_switching_ai or context.is_manager):
+ await context.respond("不好意思,只有管理员才能切换AI!")
+ return False
+ context.conversation_handler.current_conversation = (
+ await context.conversation_handler.create(
+ bot_type_search
+ )
+ )
+ await context.respond(f"已切换至 {bot_type_search} AI,现在开始和我聊天吧!")
+ return True
+
+ async def handle_switch_voice(self, context):
+ voice_name = context.args[0]
+ if not config.azure.tts_speech_key and config.text_to_speech.engine == "azure":
+ await context.respond("未配置 Azure TTS 账户,无法切换语音!")
+ new_voice = voice_name
+ if new_voice in ['关闭', "None"]:
+ context.conversation_context.conversation_voice = None
+ await context.respond("已关闭语音,让我们继续聊天吧!")
+ elif config.text_to_speech.engine == "vits":
+ from utils.vits_tts import vits_api_instance
+ try:
+ voice_name = await vits_api_instance.set_id(new_voice)
+ context.conversation_context.conversation_voice = TtsVoiceManager.parse_tts_voice("vits", voice_name)
+ await context.respond(f"已切换至 {voice_name} 语音,让我们继续聊天吧!")
+ except ValueError:
+ await context.respond("提供的语音ID无效,请输入一个有效的数字ID。")
+ except Exception as e:
+ await context.respond(str(e))
+ elif config.text_to_speech.engine == "edge":
+ if tts_voice := TtsVoiceManager.parse_tts_voice("edge", new_voice):
+ context.conversation_context.conversation_voice = tts_voice
+ await context.respond(f"已切换至 {tts_voice.alias} 语音,让我们继续聊天吧!")
+ else:
+ available_voice = ",".join([v.alias for v in await TtsVoiceManager.list_tts_voices(
+ "edge", config.text_to_speech.default_voice_prefix)])
+ await context.respond(f"提供的语音ID无效,请输入一个有效的语音ID。如:{available_voice}。")
+ context.conversation_context.conversation_voice = None
+ elif config.text_to_speech.engine == "azure":
+ tts_voice = TtsVoiceManager.parse_tts_voice("azure", new_voice)
+ context.conversation_context.conversation_voice = tts_voice
+ if tts_voice:
+ await context.respond(f"已切换至 {tts_voice.full_name} 语音,让我们继续聊天吧!")
+ else:
+ await context.respond("提供的语音ID无效,请输入一个有效的语音ID。")
+ else:
+ await context.respond("未配置文字转语音引擎,无法使用语音功能。")
+ return True
+
+ async def handle_reset_conversation(self, context):
+ context.task[0] = context.conversation_context.reset()
+ return False
+
+ async def handle_rollback_command(self, context):
+ context.task[0] = context.conversation_context.rollback()
+ return False
+
+ async def handle_command(self, prompt, session_id, conversation_context, conversation_handler, respond, is_manager, task):
+ for command_regex, handler in self.commands.items():
+ if match := re.search(command_regex, prompt):
+ args = match.groups()
+ context = CommandContext(args, session_id, conversation_context, conversation_handler, respond,
+ is_manager, task)
+ return await handler(context)
+ return False
diff --git a/config.py b/config.py
index 1e4aa745..fcd461b7 100644
--- a/config.py
+++ b/config.py
@@ -9,13 +9,11 @@
class Onebot(BaseModel):
- qq: int
- """Bot 的 QQ 号"""
manager_qq: int = 0
"""机器人管理员的 QQ 号"""
reverse_ws_host: str = "0.0.0.0"
"""go-cqhttp 的 反向 ws 主机号"""
- reverse_ws_port: Optional[int] = None
+ reverse_ws_port: Optional[int] = 8566
"""go-cqhttp 的 反向 ws 端口号,填写后开启 反向 ws 模式"""
@@ -59,13 +57,35 @@ class HttpService(BaseModel):
"""是否开启debug,错误时展示日志"""
-class OpenAIGPT3Params(BaseModel):
+class WecomBot(BaseModel):
+ host: str = "0.0.0.0"
+ """企业微信回调地址,需要能够被公网访问,0.0.0.0则不限制访问地址"""
+ port: int = 5001
+ """Http service port, 默认5001"""
+ debug: bool = False
+ """是否开启debug,错误时展示日志"""
+ corp_id: str
+ """企业微信 的 企业 ID"""
+ agent_id: str
+ """企业微信应用 的 AgentId"""
+ secret: str
+ """企业微信应用 的 Secret"""
+ token: str
+ """企业微信应用 API 令牌 的 Token"""
+ encoding_aes_key: str
+ """企业微信应用 API 令牌 的 EncodingAESKey"""
+
+
+class OpenAIParams(BaseModel):
temperature: float = 0.5
max_tokens: int = 4000
top_p: float = 1.0
presence_penalty: float = 0.0
frequency_penalty: float = 0.0
min_tokens: int = 1000
+ compressed_session: bool = False
+ compressed_tokens: int = 1000
+ stream: bool = True
class OpenAIAuths(BaseModel):
@@ -74,7 +94,7 @@ class OpenAIAuths(BaseModel):
api_endpoint: Optional[str] = None
"""自定义 OpenAI API 的接入点"""
- gpt3_params: OpenAIGPT3Params = OpenAIGPT3Params()
+ gpt_params: OpenAIParams = OpenAIParams()
accounts: List[Union[OpenAIEmailAuth, OpenAISessionTokenAuth, OpenAIAccessTokenAuth, OpenAIAPIKey]] = []
@@ -183,7 +203,7 @@ class BingAuths(BaseModel):
"""Bing 的会话创建接入点"""
accounts: List[BingCookiePath] = []
"""Bing 的账号列表"""
- max_messages: int = 20
+ max_messages: int = 30
"""Bing 的最大消息数,仅展示用"""
@@ -193,8 +213,26 @@ class BardAuths(BaseModel):
class YiyanCookiePath(BaseModel):
- cookie_content: str
- """"文心一言网站的 Cookie 内容"""
+ BDUSS: Optional[str] = None
+ """百度 Cookie 中的 BDUSS 字段"""
+ BAIDUID: Optional[str] = None
+ """百度 Cookie 中的 BAIDUID 字段"""
+ cookie_content: Optional[str] = None
+ """百度 Cookie (已弃用)"""
+ proxy: Optional[str] = None
+ """可选的代理地址,留空则检测系统代理"""
+
+
+class XinghuoCookiePath(BaseModel):
+ ssoSessionId: str
+ """星火 Cookie 中的 ssoSessionId 字段"""
+ fd: Optional[str] = ""
+ """星火请求中的 fd 字段"""
+ GtToken: Optional[
+ str] = "R0VFAAYyNDAzOTU0YzM5Y2M0ZTRlNDY2MTE2MDA4ZGZlYjZjMGQzNGMyMGY0YjQ1NTA1NDg3OWQ0ZWJlOTk0NzQxNGI1MWUzM2IzZDUyZTEyMGM3MWYxNjlmNWY2YmYwMWMxNDI2YzIxOTlmZjMzYTI5YmY3YjQ1M2RjZGQwZWNjMDdiYjMzMmY4OTE2OTRhYTk1OWIyZWVlNzFjNmI5ZWFmY2MxNDFkNjk2MWYzYWQ3ZDAyYjZkM2U0YTllYWZlOTM0Njc4NmMyZmQ4NTRiYWViMTI2NjhlZmFhMWRiNmRmMDc5MzQxN2EyYzMzZDhiN2M4NzJjMzQ3YTYwNDFiMGZkZjkxN2Q2OTRlOWFiZWMwN2U0ZTg3Y2UwM2UxNDlmODBjMzA0MmE4NTAyNzhiNjU0MTU3ZjBlMmMzN2UxMTQ0MjA3ZWE0MDIzZTMyNDRiMjJmMjcwYjE5NGZiMWJhMmFlNGQ4YzkxMWNmZmQ0OGQzYzBlYmQxMTk1ZjE5MDJmMTVjNWUyMDI3ZmNmMDI0ODIxYWJiMWZhNzc3MTExOTBiZmZhMWRhYmRlYzVhYTkwMGRlMjU2YjFhNGQ4ZGYwYzQ0ZjI4MGJiNzcyNGIyOTlkYjU0ZGMyYjllY2U1NjNlYjQzZWE5MzhkMmQ3NTFjMTVkMGY0NDNkYjdhNzdlMmQ4NzM1NTQ3NDI0ZDBjNzRmMTA0NzY4NmI2M2UwZWRiMDM0ZjNhODc1NGZkYjgxMDBlNDA0MmZlZDYzZmFlYmYyNTExMTI5NTIyOTg0ZDMzN2UxYTBhN2NiZWZlZGMxOTVjOWQ2MGVhOTMyY2E5M2VhYmZkODI1YjBiMzU0ZDViYzUzMmM5YzI5NjA2ZWU3MmFmNGYwNGRkNTlhNDEzYzJiZmYyODllZjBkNWJlNWU5ZjZkZWVlMjk4MDUyMTU2OTQwNzE3ZDQ5M2NlM2E4YmIwN2YyZjE4MzgzZmEwNjQxNGZlYmFlNzdmN2QwNTZlYTQ3NDEwMmNlZjU1YmZhNjNjMDM2MmI5OTU2NjBkZjg4YzFjYzA2MmY0NjU2OTE0ZGIwMWE3ODQxNjA2YjdlZWE3ZDJjZTM4NjE5YTcwYjg0MmVkZTBmM2Y1MzI3ZGI2YmU5M2ZjYTNiMzg4OTJkOGQ3NWI4Y2M4YjQ3NjBkNDExZmQ3ZmFlNGIxY2YwMGE5ZDk2MmM2ZDYzMWE1YmRjNmYzMmU0Y2U5MDYwOGNiMDMzMTlkZGE2ZDlkMGU4OGUwMzUwMDkwZTQ5MGRhMmY5ODU1MGU4ZmQ1ODc3NmQ0Yjg5MDM1Y2FiNTg3MjMyMGMwOTJmOTUyODkwYmQ3YjIwYTMzODI5Y2MwY2VlZTE0MWY5N2FiN2IzYmJjNDg3MWM0M2E3ZTViYWNjZWZiZjg4MjM1ZDRiNWMzMjBjM2IxNGM2ZWE2NWVkZjc0OWI0ZDNlNzZjOWYyMTkwZDM0ZTVkYTZkNjM1NjFmZWNmMWYyODIxMTMyNjIyOGFjMWU0MTA2NjY1OWQ4Y2JlZTRmMjIwYzI2NjNmNzYxYzBhZGEyY2VkZjkyNDkzZWExNzFhN2NhZThiNTMxNDNmNzEzM2RhY2UyOWNmYjQ4ZTk5YzE2YjcyM2ZmZTJjZDk5MjU0NGM5OWNhOTFlMDRlMWNiNTQ5ZjU4MGQxY2I4YWU5MWU0MDlmZDZmYjhjNGYzYTRmODA2ZWFiZjRlMDI3OWJmOTM4NmQwN2I5MTBmYzlkYzNjMGM2ODIzYjg4OWFjNWZkZjBhYWNjYzNhYmU0MDRmMTg3Y2Q0MGNmMjcyNWFmY2VkYzAzYmVjZGY2MmMzNWRkNzQ5MGExYjQ1MDdlNTczNDI1OTliYTJhMjNmM2FmNDg1NGM3ODZkYzBiZWIzYTllMGEwYWUyMTllNmZhNzYyN2YyNTI5ZDc3YzQ3MGY1YzIxNzI1NzhhM2EwYzM3NzM0NTM4MTlhYjE3ODJiNmRmOGM1NTI2YjQzZjUzNTZlNDVhM2Q5MDc4N2IwZGNkZTdmYmYzM2ZkMWQ2NGY2NjdmOWYzNDIzZjJkMmU2NzgyMTY5ZWM3MTE1Y2E3MDdlYWRhOGJmNzI0OTJmMGM3Y2QxNjJjMDI4NmFjOThmNDhmOWEyYWQzZDAwYzg5YmViYzA3NTA4ZjYwYzE1OGVmYjk5ZjBkOGY4MzQ1ODI5Yzg4Yzc0YTA3OGQyZjU5NTFjNmQzNTc1N2QyNjI0NWVjNTk0Y2JkMzc2YmVhMGNiZmEzMWYwZTA5MGRhYzhlYzNlYjQ0ZGIxN2M4MWE5NWY4MTE4MDAwNDJkMjQ2MmMzMjk2ODU5Yjg3ZjRhZmI1MDYxM2MxY2FiYTZkZDI0ODdiZDQ3MmVmNzBjMzFkN2YwNjZmZTMxOThiYzFhOWFlZjIwZTQzY2FlNDBkMDkxZWEzMmNiYTBhNDM0YmQ2ZDU2NDQ3YTU4YTNjODZjYTk0NjQ3MGNiZjM4ZjM3ZjU2YTZkZmQ4MDY0OWEyZGU3MzllN2EyZWE3M2RlNDE5NDljNmI4ODU2YmE5ZTM4Njc2YmRhNzA1MWE5MjlmMWU1YTczZjEwYTg2ZjgwNDJjZDQxZTMwYjVjMTA1ODYzNzlhMGY3NmRlOWExODZiZmU2N2Y5NzZhOTY3MTg0ZjNkYmFhYWU0YjdmNmFlMjM5MTlkNDljNDNiODc4MzRjMjA0MzY4YThkOGEyYzRkNjc3MzhkMTU0NmFiNTVjMWE0YTQ0Y2M3MzE5OGM4Y2YzOTAxZGI0ZGY1MzFmNGY5NTI4MDE5MjZjN2I2MDg1YjQzODI0YmFiMTQ3NTIxZTYwNWQzYzhmZjljYjNmOTRlNzg3MDJiYzc1MzE4NTRhN2M3ZDE2OWQyMzcyYjUzMDBhNGQzNzhhYWNjOTk3ZDM1ZTZjODYwZGQwMWNlYTMwZjU1YTFlMjQxMTMxMTQwZjQwMWJmZGJkNWU3NzA4OWE5YzljNDIzY2E2ODk3OGE2ODMwYWEzYTlkZGJiZmMyYTE3NGZhOTc4NmI3ZTYyYmIzNTZlNjRiMzBiYzI4ZDMyYTVjMDMxYzgxZjZlOGEyMGMwNWFlNjJlYWM2ZWExNDY5OTFiZjk1Yzc4NzQzMjMwYTIyNzk1MWRlMzI4NjFjYjU5ZGQ3N2QxOWQ5MTMxNDgwYmY2ZTgyYTkwNzgwMTBlYjAzMzIzYjcxNGY0NzM5NDNmY2MwNTM3ODJmOTIwMGFkNzlmNzZiNjkxNDdmZGQwOTdhZTUwMTk1YjE4M2Q2YWM5NjVmN2NkNDNhMGI3MTEwOTNkZTM5NGM3OTYwNjNlNTBhMDAyNzNkOTE2MzQzODY2MzFkZThkMzViYTUxNmI4MTIyZWZjNzE5MTU0OTQ2NTIyYzc0YjhmNTY2OTMwZDM3YmIwZjJkM2Q4ODgyZGQwZTU0YTcyODM1NmYyZDk2ZWVlNzZiYmZlYjI1YTFjM2ZhNTg5OGY5OTM0YTc4NTBjYzRlNjY4NjE5YWMzOTg2MmE5NDhjMDVhMTc0MzE0MjIwOGFhMjk5OGY2ZmIwMmZlZWI2YTk0M2Q1NzcyN2JhZWU4ZmY5NGFmZjgzZGVjMTUyZmYxOWVkYmM1Y2RiZDkzYzBiNDc1OTEzMjFhYTY4MjI1MDA4ODhmYWJhMzAzNjdlZmRjYmJjNzhjYzE5MWI1MDViNTlmMjBhY2RiYTYzMzQyYzE1YTI2M2NiOGE1NDQ3NzQ4ODU3YWYxMzllMDJlMzY0ODlkNjRlNTRiMTc5YTgwOGRmMWU5YTk1ODY2YzE2YTYzM2EyZmUyYjA2MzM4OTI5YTc4MmRlMGFkZDgwZDZiYWU3Y2M1ZjljMWEzYzA5MGU4MTVlNjc2MGJjMzA0ZWU3ZmY1MDM5OGRiNDc0YTJkNWMzYWVhNTMxZjc0ZDU3NGNhZGNhZTIzZmZiZjcyY2FhNmU5YTNjNjFhYzNiMDJjNDdjYzQzZGJhYjA2NTgwNTkyZmE5YjMyNGMxMGJhMGRjNjgzZWIyYzRiNDg4NzFiMjk2YmIxNDBhMWUyZWRlOTE0NmY3MThkZTE4ZWU0M2QwZTk4NWY3NWQ1YWYyYjlkNjU5ODM5YzQwZWFiMzg2"
+ """星火请求中的 GtToken 字段"""
+ sid: Optional[str] = ""
+ """星火请求中的 sid 字段"""
proxy: Optional[str] = None
"""可选的代理地址,留空则检测系统代理"""
@@ -204,6 +242,11 @@ class YiyanAuths(BaseModel):
"""文心一言的账号列表"""
+class XinghuoAuths(BaseModel):
+ accounts: List[XinghuoCookiePath] = []
+ """讯飞星火大模型的账号列表"""
+
+
class ChatGLMAPI(BaseModel):
api_endpoint: str
"""自定义 ChatGLM API 的接入点"""
@@ -218,6 +261,25 @@ class ChatGLMAuths(BaseModel):
"""ChatGLM的账号列表"""
+class SlackAppAccessToken(BaseModel):
+ channel_id: str
+ """负责与机器人交互的 Channel ID"""
+
+ access_token: str
+ """安装 Slack App 时获得的 access_token"""
+
+ proxy: Optional[str] = None
+ """可选的代理地址,留空则检测系统代理"""
+
+ app_endpoint: str = "https://chatgpt-proxy.lss233.com/claude-in-slack/backend-api/"
+ """API 的接入点"""
+
+
+class SlackAuths(BaseModel):
+ accounts: List[SlackAppAccessToken] = []
+ """Slack App 账号信息"""
+
+
class TextToImage(BaseModel):
always: bool = False
"""强制开启,设置后所有的会话强制以图片发送"""
@@ -241,8 +303,10 @@ class TextToSpeech(BaseModel):
"""设置后所有的会话都会转语音再发一次"""
engine: str = "azure"
"""文字转语音引擎选择,当前有azure和vits"""
- default: str = "zh-CN-XiaoyanNeural"
+ default: str = "zh-CN-XiaoxiaoNeural"
"""默认设置为Azure语音音色"""
+ default_voice_prefix: List[str] = ["zh-CN", "zh-TW"]
+ """默认的提示音色前缀"""
class AzureConfig(BaseModel):
@@ -322,19 +386,21 @@ class Response(BaseModel):
error_format: str = "出现故障!如果这个问题持续出现,请和我说“重置会话” 来开启一段新的会话,或者发送 “回滚对话” 来回溯到上一条对话,你上一条说的我就当作没看见。\n原因:{exc}"
"""发生错误时发送的消息,请注意可以插入 {exc} 作为异常占位符"""
- error_network_failure: str = "网络故障!连接 OpenAI 服务器失败,我需要更好的网络才能服务!\n{exc}"
+ error_network_failure: str = "网络故障!连接服务器失败,我需要更好的网络才能服务!\n{exc}"
"""发生网络错误时发送的消息,请注意可以插入 {exc} 作为异常占位符"""
error_session_authenciate_failed: str = "身份验证失败!无法登录至 ChatGPT 服务器,请检查账号信息是否正确!\n{exc}"
"""发生网络错误时发送的消息,请注意可以插入 {exc} 作为异常占位符"""
- error_request_too_many: str = "糟糕!当前收到的请求太多了,我需要一段时间冷静冷静。你可以选择“重置会话”,或者过一会儿再来找我!\n预计恢复时间:{exc}\n"
+ error_request_too_many: str = "糟糕!当前 ChatGPT 接入点收到的请求太多了,我需要一段时间冷静冷静。请过一会儿再来找我!\n预计恢复时间:{exc}(Code: 429)\n"
error_request_concurrent_error: str = "当前有其他人正在和我进行聊天,请稍后再给我发消息吧!"
error_server_overloaded: str = "抱歉,当前服务器压力有点大,请稍后再找我吧!"
"""服务器提示 429 错误时的回复 """
+ error_drawing: str = "画图失败!原因: {exc}"
+
placeholder: str = (
"您好!我是 Assistant,一个由 OpenAI 训练的大型语言模型。我不是真正的人,而是一个计算机程序,可以通过文本聊天来帮助您解决问题。如果您有任何问题,请随时告诉我,我将尽力回答。\n"
"如果您需要重置我们的会话,请回复`重置会话`。"
@@ -377,8 +443,11 @@ class Response(BaseModel):
queued_notice: str = "消息已收到!当前我还有{queue_size}条消息要回复,请您稍等。"
"""新消息进入队列时,发送的通知。 queue_size 是当前排队的消息数"""
- ping_response: str = "当前AI:{current_ai}\n当前可用AI(输入此命令切换:切换AI XXX):\n{supported_ai}"
+ ping_response: str = "当前AI:{current_ai} / 当前语音:{current_voice}\n指令:\n切换AI XXX / 切换语音 XXX" \
+ "\n\n可用AI:\n{supported_ai}"
"""ping返回内容"""
+ ping_tts_response: str = "\n可用语音:\n{supported_tts}"
+ """ping tts 返回"""
class System(BaseModel):
@@ -388,6 +457,9 @@ class System(BaseModel):
accept_friend_request: bool = False
"""自动接收好友请求"""
+ auto_reset_timeout_seconds: int = 8 * 3600
+ """会话闲置多长时间后会重置, -1 不重置"""
+
class BaiduCloud(BaseModel):
check: bool = False
@@ -441,16 +513,18 @@ class SDWebUI(BaseModel):
seed: int = -1
batch_size: int = 1
n_iter: int = 1
- cfg_scale: float = 0.75
+ cfg_scale: float = 7.5
restore_faces: bool = False
+ authorization: str = ''
+ """登录api的账号:密码"""
timeout: float = 10.0
"""超时时间"""
+
class Config(BaseConfig):
extra = Extra.allow
-
class Config(BaseModel):
# === Platform Settings ===
onebot: Optional[Onebot] = None
@@ -458,6 +532,7 @@ class Config(BaseModel):
telegram: Optional[TelegramBot] = None
discord: Optional[DiscordBot] = None
http: Optional[HttpService] = None
+ wecom: Optional[WecomBot] = None
# === Account Settings ===
openai: OpenAIAuths = OpenAIAuths()
@@ -467,6 +542,8 @@ class Config(BaseModel):
yiyan: YiyanAuths = YiyanAuths()
chatglm: ChatGLMAuths = ChatGLMAuths()
poe: PoeAuths = PoeAuths()
+ slack: SlackAuths = SlackAuths()
+ xinghuo: XinghuoAuths = XinghuoAuths()
# === Response Settings ===
text_to_image: TextToImage = TextToImage()
@@ -538,8 +615,8 @@ def load_config() -> Config:
return Config.parse_obj(toml.loads(env_config))
try:
if (
- not os.path.exists('config.cfg')
- or os.path.getsize('config.cfg') <= 0
+ not os.path.exists('config.cfg')
+ or os.path.getsize('config.cfg') <= 0
) and os.path.exists('config.json'):
logger.info("正在转换旧版配置文件……")
Config.save_config(Config.__load_json_config())
diff --git a/constants.py b/constants.py
index 6912a6f1..9cbe58f3 100644
--- a/constants.py
+++ b/constants.py
@@ -10,12 +10,17 @@
class LlmName(Enum):
+ SlackClaude = "slack-claude"
PoeSage = "poe-sage"
PoeGPT4 = "poe-gpt4"
+ PoeGPT432k = "poe-gpt432k"
PoeClaude2 = "poe-claude2"
PoeClaude = "poe-claude"
+ PoeClaude100k = "poe-claude100k"
PoeChatGPT = "poe-chatgpt"
- PoeDragonfly = "poe-dragonfly"
+ PoeChatGPT16k = "poe-chatgpt16k"
+ PoeLlama2 = "poe-llama2"
+ PoePaLM = "poe-palm"
ChatGPT_Web = "chatgpt-web"
ChatGPT_Api = "chatgpt-api"
Bing = "bing"
@@ -25,3 +30,13 @@ class LlmName(Enum):
Bard = "bard"
YiYan = "yiyan"
ChatGLM = "chatglm-api"
+ XunfeiXinghuo = "xinghuo"
+
+
+class BotPlatform(Enum):
+ AriadneBot = "mirai"
+ DiscordBot = "discord"
+ Onebot = "onebot"
+ TelegramBot = "telegram"
+ HttpService = "http"
+ WecomBot = "wecom"
diff --git a/conversation.py b/conversation.py
index 94ccfecc..2f1bdebb 100644
--- a/conversation.py
+++ b/conversation.py
@@ -1,9 +1,11 @@
+import asyncio
import contextlib
+import time
from datetime import datetime
from typing import List, Dict, Optional
import httpx
-from EdgeGPT import ConversationStyle
+from EdgeGPT.EdgeGPT import ConversationStyle
from graia.amnesia.message import MessageChain
from graia.ariadne.message.element import Image as GraiaImage, Element
from loguru import logger
@@ -13,14 +15,16 @@
from adapter.botservice import BotAdapter
from adapter.chatgpt.api import ChatGPTAPIAdapter
from adapter.chatgpt.web import ChatGPTWebAdapter
+from adapter.claude.slack import ClaudeInSlackAdapter
from adapter.google.bard import BardAdapter
from adapter.ms.bing import BingAdapter
+from adapter.xunfei.xinghuo import XinghuoAdapter
from drawing import DrawingAPI, SDWebUI as SDDrawing, OpenAI as OpenAIDrawing
from adapter.quora.poe import PoeBot, PoeAdapter
from adapter.thudm.chatglm_6b import ChatGLM6BAdapter
from constants import config
from exceptions import PresetNotFoundException, BotTypeNotFoundException, NoAvailableBotException, \
- CommandRefusedException
+ CommandRefusedException, DrawingFailedException
from renderer import Renderer
from renderer.merger import BufferedContentMerger, LengthContentMerger
from renderer.renderer import MixedContentMessageChainRenderer, MarkdownImageRenderer, PlainTextRenderer
@@ -28,12 +32,13 @@
from middlewares.draw_ratelimit import MiddlewareRatelimit
from utils import retry
from constants import LlmName
+from utils.text_to_speech import TtsVoice, TtsVoiceManager
handlers = {}
-
middlewares = MiddlewareRatelimit()
+
class ConversationContext:
type: str
adapter: BotAdapter
@@ -54,7 +59,7 @@ class ConversationContext:
preset_decoration_format: Optional[str] = "{prompt}"
"""预设装饰文本"""
- conversation_voice: Optional[str] = None
+ conversation_voice: TtsVoice = None
"""语音音色"""
@property
@@ -70,11 +75,17 @@ def __init__(self, _type: str, session_id: str):
self.last_resp = ''
+ self.last_resp_time = -1
+
self.switch_renderer()
if config.text_to_speech.always:
- self.conversation_voice = config.text_to_speech.default
-
+ tts_engine = config.text_to_speech.engine
+ tts_voice = config.text_to_speech.default
+ try:
+ self.conversation_voice = TtsVoiceManager.parse_tts_voice(tts_engine, tts_voice)
+ except KeyError as e:
+ logger.error(f"Failed to load {tts_engine} tts voice setting -> {tts_voice}")
if _type == LlmName.ChatGPT_Web.value:
self.adapter = ChatGPTWebAdapter(self.session_id)
elif _type == LlmName.ChatGPT_Api.value:
@@ -95,6 +106,10 @@ def __init__(self, _type: str, session_id: str):
self.adapter = YiyanAdapter(self.session_id)
elif _type == LlmName.ChatGLM.value:
self.adapter = ChatGLM6BAdapter(self.session_id)
+ elif _type == LlmName.SlackClaude.value:
+ self.adapter = ClaudeInSlackAdapter(self.session_id)
+ elif _type == LlmName.XunfeiXinghuo.value:
+ self.adapter = XinghuoAdapter(self.session_id)
else:
raise BotTypeNotFoundException(_type)
self.type = _type
@@ -135,14 +150,18 @@ def switch_renderer(self, mode: Optional[str] = None):
async def reset(self):
await self.adapter.on_reset()
self.last_resp = ''
+ self.last_resp_time = -1
yield config.response.reset
@retry((httpx.ConnectError, httpx.ConnectTimeout, TimeoutError))
async def ask(self, prompt: str, chain: MessageChain = None, name: str = None):
+ await self.check_and_reset()
# 检查是否为 画图指令
for prefix in config.trigger.prefix_image:
if prompt.startswith(prefix) and not isinstance(self.adapter, YiyanAdapter):
+ # TODO(lss233): 此部分可合并至 RateLimitMiddleware
respond_str = middlewares.handle_draw_request(self.session_id, prompt)
+ # TODO(lss233): 这什么玩意
if respond_str != "1":
yield respond_str
return
@@ -150,12 +169,15 @@ async def ask(self, prompt: str, chain: MessageChain = None, name: str = None):
yield "未配置画图引擎,无法使用画图功能!"
return
prompt = prompt.removeprefix(prefix)
- if chain.has(GraiaImage):
- images = await self.drawing_adapter.img_to_img(chain.get(GraiaImage), prompt)
- else:
- images = await self.drawing_adapter.text_to_img(prompt)
- for i in images:
- yield i
+ try:
+ if chain.has(GraiaImage):
+ images = await self.drawing_adapter.img_to_img(chain.get(GraiaImage), prompt)
+ else:
+ images = await self.drawing_adapter.text_to_img(prompt)
+ for i in images:
+ yield i
+ except Exception as e:
+ raise DrawingFailedException from e
respond_str = middlewares.handle_draw_respond_completed(self.session_id, prompt)
if respond_str != "1":
yield respond_str
@@ -176,6 +198,7 @@ async def ask(self, prompt: str, chain: MessageChain = None, name: str = None):
else:
yield await self.renderer.render(item)
self.last_resp = item or ''
+ self.last_resp_time = int(time.time())
yield await self.renderer.result()
async def rollback(self):
@@ -206,8 +229,9 @@ async def load_preset(self, keyword: str):
continue
if role == 'voice':
- self.conversation_voice = text.strip()
- logger.debug(f"Set conversation voice to {self.conversation_voice}")
+ self.conversation_voice = TtsVoiceManager.parse_tts_voice(config.text_to_speech.engine,
+ text.strip())
+ logger.debug(f"Set conversation voice to {self.conversation_voice.full_name}")
continue
async for item in self.adapter.preset_ask(role=role.lower().strip(), text=text.strip()):
@@ -220,6 +244,15 @@ def delete_message(self, respond_msg):
# TODO: adapt to all platforms
pass
+ async def check_and_reset(self):
+ timeout_seconds = config.system.auto_reset_timeout_seconds
+ current_time = time.time()
+ if timeout_seconds == -1 or self.last_resp_time == -1 or current_time - self.last_resp_time < timeout_seconds:
+ return
+ logger.debug(f"Reset conversation({self.session_id}) after {current_time - self.last_resp_time} seconds.")
+ async for _resp in self.reset():
+ logger.debug(_resp)
+
class ConversationHandler:
"""
diff --git a/docker-compose.go-cqhttp.yaml b/docker-compose.go-cqhttp.yaml
index f0239e94..3a4675b0 100644
--- a/docker-compose.go-cqhttp.yaml
+++ b/docker-compose.go-cqhttp.yaml
@@ -1,7 +1,7 @@
version: '3.4'
services:
gocqhttp:
- image: ghcr.io/mrs4s/go-cqhttp:master
+ image: silicer/go-cqhttp:latest
restart: always
environment:
LANG: 'C.UTF-8'
diff --git a/drawing/sdwebui.py b/drawing/sdwebui.py
index 24b0011a..e07107db 100644
--- a/drawing/sdwebui.py
+++ b/drawing/sdwebui.py
@@ -1,5 +1,5 @@
from typing import List
-
+import base64
import httpx
from graia.ariadne.message.element import Image
@@ -7,8 +7,26 @@
from .base import DrawingAPI
+def basic_auth_encode(authorization: str) -> str:
+ authorization_bytes = authorization.encode('utf-8')
+ encoded_authorization = base64.b64encode(authorization_bytes).decode('utf-8')
+ return f"Basic {encoded_authorization}"
+
+
+def init_authorization():
+ if config.sdwebui.authorization != '':
+ return basic_auth_encode(config.sdwebui.authorization)
+ else:
+ return ''
+
+
class SDWebUI(DrawingAPI):
+ def __init__(self):
+ self.headers = {
+ "Authorization": f"{init_authorization()}"
+ }
+
async def text_to_img(self, prompt):
payload = {
'enable_hr': 'false',
@@ -33,13 +51,17 @@ async def text_to_img(self, prompt):
payload[key] = value
resp = await httpx.AsyncClient(timeout=config.sdwebui.timeout).post(f"{config.sdwebui.api_url}sdapi/v1/txt2img",
- json=payload)
+ json=payload, headers=self.headers)
resp.raise_for_status()
r = resp.json()
return [Image(base64=i) for i in r.get('images', [])]
async def img_to_img(self, init_images: List[Image], prompt=''):
+ # 需要调用get_bytes方法,才能获取到base64字段内容
+ for x in init_images: await x.get_bytes()
+ # 消息链显示字符串中有“[图片]”字样,需要过滤
+ prompt = prompt.replace("[图片]", "")
payload = {
'init_images': [x.base64 for x in init_images],
'enable_hr': 'false',
@@ -65,7 +87,7 @@ async def img_to_img(self, init_images: List[Image], prompt=''):
payload[key] = value
resp = await httpx.AsyncClient(timeout=config.sdwebui.timeout).post(f"{config.sdwebui.api_url}sdapi/v1/img2img",
- json=payload)
+ json=payload, headers=self.headers)
resp.raise_for_status()
r = resp.json()
return [Image(base64=i) for i in r.get('images', [])]
diff --git a/exceptions/__init__.py b/exceptions/__init__.py
index 2215d378..88ed3ede 100644
--- a/exceptions/__init__.py
+++ b/exceptions/__init__.py
@@ -24,3 +24,8 @@ def __init__(self, estimated_at):
class APIKeyNoFundsError(Exception): ...
+
+
+class DrawingFailedException(Exception):
+ def __init__(self):
+ self.__cause__ = None
diff --git a/manager/bot.py b/manager/bot.py
index 36b81aa6..bf334714 100644
--- a/manager/bot.py
+++ b/manager/bot.py
@@ -1,12 +1,17 @@
+import asyncio
import hashlib
import itertools
import os
import urllib.request
from typing import List, Dict
from urllib.parse import urlparse
-
-import OpenAIAuth
+import re
+import base64
+import json
+import time
+import httpx
import openai
+import regex
import requests
import urllib3.exceptions
from aiohttp import ClientConnectorError
@@ -22,7 +27,7 @@
import utils.network as network
from chatbot.chatgpt import ChatGPTBrowserChatbot
from config import OpenAIAuthBase, OpenAIAPIKey, Config, BingCookiePath, BardCookiePath, YiyanCookiePath, ChatGLMAPI, \
- PoeCookieAuth
+ PoeCookieAuth, SlackAppAccessToken, XinghuoCookiePath
from exceptions import NoAvailableBotException, APIKeyNoFundsError
@@ -36,6 +41,8 @@ class BotManager:
"bing-cookie": [],
"bard-cookie": [],
"yiyan-cookie": [],
+ "xinghuo-cookie": [],
+ "slack-accesstoken": [],
}
"""Bot list"""
@@ -57,6 +64,12 @@ class BotManager:
chatglm: List[ChatGLMAPI]
"""chatglm Account Infos"""
+ slack: List[SlackAppAccessToken]
+ """Slack Account Infos"""
+
+ xinghuo: List[XinghuoCookiePath]
+ """Xinghuo Account Infos"""
+
roundrobin: Dict[str, itertools.cycle] = {}
def __init__(self, config: Config) -> None:
@@ -67,6 +80,8 @@ def __init__(self, config: Config) -> None:
self.poe = config.poe.accounts if config.poe else []
self.yiyan = config.yiyan.accounts if config.yiyan else []
self.chatglm = config.chatglm.accounts if config.chatglm else []
+ self.slack = config.slack.accounts if config.slack else []
+ self.xinghuo = config.xinghuo.accounts if config.xinghuo else []
try:
os.mkdir('data')
logger.warning(
@@ -75,6 +90,46 @@ def __init__(self, config: Config) -> None:
pass
self.cache_db = TinyDB('data/login_caches.json')
+ async def handle_openai(self):
+ # 考虑到有人会写错全局配置
+ for account in self.config.openai.accounts:
+ account = account.dict()
+ if 'browserless_endpoint' in account:
+ logger.warning("警告: browserless_endpoint 配置位置有误,正在将其调整为全局配置")
+ self.config.openai.browserless_endpoint = account['browserless_endpoint']
+ if 'api_endpoint' in account:
+ logger.warning("警告: api_endpoint 配置位置有误,正在将其调整为全局配置")
+ self.config.openai.api_endpoint = account['api_endpoint']
+
+ # 应用 browserless_endpoint 配置
+ if self.config.openai.browserless_endpoint:
+ V1.BASE_URL = self.config.openai.browserless_endpoint or V1.BASE_URL
+ logger.info(f"当前的 browserless_endpoint 为:{V1.BASE_URL}")
+
+ # 历史遗留问题 1
+ if V1.BASE_URL == 'https://bypass.duti.tech/api/':
+ logger.error("检测到你还在使用旧的 browserless_endpoint,已为您切换。")
+ V1.BASE_URL = "https://bypass.churchless.tech/api/"
+ # 历史遗留问题 2
+ if not V1.BASE_URL.endswith("api/"):
+ logger.warning(
+ f"提示:你可能要将 browserless_endpoint 修改为 \"{self.config.openai.browserless_endpoint}api/\"")
+
+ # 应用 api_endpoint 配置
+ if self.config.openai.api_endpoint:
+ openai.api_base = self.config.openai.api_endpoint or openai.api_base
+ if openai.api_base.endswith("/"):
+ openai.api_base.removesuffix("/")
+ logger.info(f"当前的 api_endpoint 为:{openai.api_base}")
+
+ pattern = r'^https://[^/]+/v1$'
+
+ if not re.match(pattern, openai.api_base):
+ logger.error("API反代地址填写错误,正确格式应为 'https://<网址>/v1'")
+
+ await self.login_openai()
+
+
async def login(self):
self.bots = {
"chatgpt-web": [],
@@ -83,79 +138,62 @@ async def login(self):
"bing-cookie": [],
"bard-cookie": [],
"yiyan-cookie": [],
+ "xinghuo-cookie": [],
"chatglm-api": [],
+ "slack-accesstoken": [],
}
+
self.__setup_system_proxy()
- if len(self.bing) > 0:
- self.login_bing()
- if len(self.poe) > 0:
- self.login_poe()
- if len(self.bard) > 0:
- self.login_bard()
- if len(self.openai) > 0:
-
- # 考虑到有人会写错全局配置
- for account in self.config.openai.accounts:
- account = account.dict()
- if 'browserless_endpoint' in account:
- logger.warning("警告: browserless_endpoint 配置位置有误,正在将其调整为全局配置")
- self.config.openai.browserless_endpoint = account['browserless_endpoint']
- if 'api_endpoint' in account:
- logger.warning("警告: api_endpoint 配置位置有误,正在将其调整为全局配置")
- self.config.openai.api_endpoint = account['api_endpoint']
-
- # 应用 browserless_endpoint 配置
- if self.config.openai.browserless_endpoint:
- V1.BASE_URL = self.config.openai.browserless_endpoint or V1.BASE_URL
- logger.info(f"当前的 browserless_endpoint 为:{V1.BASE_URL}")
-
- # 历史遗留问题 1
- if V1.BASE_URL == 'https://bypass.duti.tech/api/':
- logger.error("检测到你还在使用旧的 browserless_endpoint,已为您切换。")
- V1.BASE_URL = "https://bypass.churchless.tech/api/"
- # 历史遗留问题 2
- if not V1.BASE_URL.endswith("api/"):
- logger.warning(
- f"提示:你可能要将 browserless_endpoint 修改为 \"{self.config.openai.browserless_endpoint}api/\"")
-
- # 应用 api_endpoint 配置
- if self.config.openai.api_endpoint:
- openai.api_base = self.config.openai.api_endpoint or openai.api_base
- if openai.api_base.endswith("/"):
- openai.api_base.removesuffix("/")
- logger.info(f"当前的 api_endpoint 为:{openai.api_base}")
-
- await self.login_openai()
- if len(self.yiyan) > 0:
- self.login_yiyan()
- if len(self.chatglm) > 0:
- self.login_chatglm()
+
+ login_funcs = {
+ 'bing': self.login_bing,
+ 'poe': self.login_poe,
+ 'bard': self.login_bard,
+ 'slack': self.login_slack,
+ 'xinghuo': self.login_xinghuo,
+ 'openai': self.handle_openai,
+ 'yiyan': self.login_yiyan,
+ 'chatglm': self.login_chatglm
+ }
+
+ for key, login_func in login_funcs.items():
+ if hasattr(self, key) and len(getattr(self, key)) > 0:
+ if asyncio.iscoroutinefunction(login_func):
+ await login_func()
+ else:
+ login_func()
+
count = sum(len(v) for v in self.bots.values())
+
if count < 1:
logger.error("没有登录成功的账号,程序无法启动!")
exit(-2)
else:
- # 输出登录状况
for k, v in self.bots.items():
logger.info(f"AI 类型:{k} - 可用账号: {len(v)} 个")
- # 自动推测默认 AI
+
if not self.config.response.default_ai:
- if len(self.bots['poe-web']) > 0:
- self.config.response.default_ai = 'poe-chatgpt'
- elif len(self.bots['chatgpt-web']) > 0:
- self.config.response.default_ai = 'chatgpt-web'
- elif len(self.bots['openai-api']) > 0:
- self.config.response.default_ai = 'chatgpt-api'
- elif len(self.bots['bing-cookie']) > 0:
- self.config.response.default_ai = 'bing'
- elif len(self.bots['bard-cookie']) > 0:
- self.config.response.default_ai = 'bard'
- elif len(self.bots['yiyan-cookie']) > 0:
- self.config.response.default_ai = 'yiyan'
- elif len(self.bots['chatglm-api']) > 0:
- self.config.response.default_ai = 'chatglm-api'
- else:
- self.config.response.default_ai = 'chatgpt-web'
+ # 自动推测默认 AI
+ default_ai_mappings = {
+ "poe-web": "poe-chatgpt",
+ "slack-accesstoken": "slack-claude",
+ "chatgpt-web": "chatgpt-web",
+ "openai-api": "chatgpt-api",
+ "bing-cookie": "bing",
+ "bard-cookie": "bard",
+ "yiyan-cookie": "yiyan",
+ "chatglm-api": "chatglm-api",
+ "xinghuo-cookie": "xinghuo",
+ }
+
+ self.config.response.default_ai = next(
+ (
+ default_ai
+ for key, default_ai in default_ai_mappings.items()
+ if len(self.bots[key]) > 0
+ ),
+ 'chatgpt-web',
+ )
def reset_bot(self, bot):
from adapter.quora.poe import PoeClientWrapper
@@ -196,12 +234,8 @@ def login_bard(self):
logger.info("正在解析第 {i} 个 Bard 账号", i=i + 1)
if proxy := self.__check_proxy(account.proxy):
account.proxy = proxy
- try:
- self.bots["bard-cookie"].append(account)
- logger.success("解析成功!", i=i + 1)
- except Exception as e:
- logger.error("解析失败:")
- logger.exception(e)
+ self.bots["bard-cookie"].append(account)
+ logger.success("解析成功!", i=i + 1)
if len(self.bots) < 1:
logger.error("所有 Bard 账号均解析失败!")
logger.success(f"成功解析 {len(self.bots['bard-cookie'])}/{len(self.bing)} 个 Bard 账号!")
@@ -214,6 +248,36 @@ def poe_check_auth(self, client: PoeClient) -> bool:
except KeyError:
return False
+ def login_slack(self):
+ try:
+ for i, account in enumerate(self.slack):
+ logger.info("正在解析第 {i} 个 Claude (Slack) 账号", i=i + 1)
+ if proxy := self.__check_proxy(account.proxy):
+ account.proxy = proxy
+ self.bots["slack-accesstoken"].append(account)
+ logger.success("解析成功!", i=i + 1)
+ except Exception as e:
+ logger.error("解析失败:")
+ logger.exception(e)
+ if len(self.bots["slack-accesstoken"]) < 1:
+ logger.error("所有 Claude (Slack) 账号均解析失败!")
+ logger.success(f"成功解析 {len(self.bots['slack-accesstoken'])}/{len(self.slack)} 个 Claude (Slack) 账号!")
+
+ def login_xinghuo(self):
+ try:
+ for i, account in enumerate(self.xinghuo):
+ logger.info("正在解析第 {i} 个 讯飞星火 账号", i=i + 1)
+ if proxy := self.__check_proxy(account.proxy):
+ account.proxy = proxy
+ self.bots["xinghuo-cookie"].append(account)
+ logger.success("解析成功!", i=i + 1)
+ except Exception as e:
+ logger.error("解析失败:")
+ logger.exception(e)
+ if len(self.bots["xinghuo-cookie"]) < 1:
+ logger.error("所有 讯飞星火 账号均解析失败!")
+ logger.success(f"成功解析 {len(self.bots['xinghuo-cookie'])}/{len(self.xinghuo)} 个 讯飞星火 账号!")
+
def login_poe(self):
from adapter.quora.poe import PoeClientWrapper
try:
@@ -238,6 +302,16 @@ def login_yiyan(self):
if proxy := self.__check_proxy(account.proxy):
account.proxy = proxy
try:
+ if account.cookie_content:
+ logger.error("cookie_content 字段已弃用,请填写 BDUSS 和 BAIDUID!")
+ account.BDUSS = (regex.findall(r"BDUSS=(.*?);", account.cookie_content) or [None])[0]
+ account.BAIDUID = (regex.findall(r"BAIDUID=(.*?);", account.cookie_content) or [None])[0]
+ if not account.BAIDUID:
+ logger.error("未填写 BAIDUID,可能会有较高封号风险!")
+ if not account.BDUSS:
+ logger.error("未填写 BDUSS,无法使用!")
+ assert account.BDUSS
+
self.bots["yiyan-cookie"].append(account)
logger.success("解析成功!", i=i + 1)
except Exception as e:
@@ -279,9 +353,11 @@ async def login_openai(self): # sourcery skip: raise-specific-error
bot.account = account
logger.success("登录成功!", i=i + 1)
counter = counter + 1
- except OpenAIAuth.Error as e:
- logger.error("登录失败! 请检查 IP 、代理或者账号密码是否正确{exc}", exc=e)
- except (ConnectTimeout, RequestException, SSLError, urllib3.exceptions.MaxRetryError, ClientConnectorError) as e:
+ except httpx.HTTPStatusError as e:
+ logger.error("登录失败! 可能是账号密码错误,或者 Endpoint 不支持 该登录方式。{exc}", exc=e)
+ except (
+ ConnectTimeout, RequestException, SSLError, urllib3.exceptions.MaxRetryError,
+ ClientConnectorError) as e:
logger.error("登录失败! 连接 OpenAI 服务器失败,请更换代理节点重试!{exc}", exc=e)
except APIKeyNoFundsError:
logger.error("登录失败! API 账号余额不足,无法继续使用。")
@@ -365,17 +441,43 @@ async def __login_V1(self, account: OpenAIAuthBase) -> ChatGPTBrowserChatbot:
if cached_account.get('model'): # Ready for backward-compatibility & forward-compatibility
config['model'] = cached_account.get('model')
+ def get_access_token():
+ return bot.session.headers.get('Authorization').removeprefix('Bearer ')
+
# 我承认这部分代码有点蠢
async def __V1_check_auth() -> bool:
try:
+ access_token = get_access_token()
+ _, payload, _ = access_token.split(".")
+
+ # Decode the payload using base64 decoding
+ payload_data = base64.urlsafe_b64decode(payload + "=" * ((4 - len(payload) % 4) % 4))
+
+ # Parse the JSON string to get the payload as a dictionary
+ payload_dict = json.loads(payload_data)
+
+ # Check the "exp" key in the payload dictionary to get the expiration time
+ exp_time = payload_dict["exp"]
+ email = payload_dict["https://api.openai.com/profile"]['email']
+
+ # Convert the expiration time to a Unix timestamp
+ exp_timestamp = int(exp_time)
+
+ # Compare the current time (also in Unix timestamp format) to the expiration time to check if the token has expired
+ current_timestamp = int(time.time())
+ if current_timestamp >= exp_timestamp:
+ logger.error(f"[ChatGPT-Web] - {email} 的 access_token 已过期")
+ return False
+ else:
+ remaining_seconds = exp_timestamp - current_timestamp
+ remaining_days = remaining_seconds // (24 * 60 * 60)
+ logger.info(f"[ChatGPT-Web] - {email} 的 access_token 还有 {remaining_days} 天过期")
await bot.get_conversations(0, 1)
return True
- except (V1Error, KeyError):
+ except (V1Error, KeyError) as e:
+ logger.error(e)
return False
- def get_access_token():
- return bot.session.headers.get('Authorization').removeprefix('Bearer ')
-
if cached_account.get('access_token'):
logger.info("尝试使用 access_token 登录中...")
config['access_token'] = cached_account.get('access_token')
@@ -397,18 +499,26 @@ def get_access_token():
if cached_account.get('password'):
logger.info("尝试使用 email + password 登录中...")
- logger.warning("警告:该方法已不推荐使用,建议使用 access_token 登录。")
config.pop('access_token', None)
config.pop('session_token', None)
config['email'] = cached_account.get('email')
config['password'] = cached_account.get('password')
- bot = V1Chatbot(config=config)
- self.__save_login_cache(account=account, cache={
- "session_token": bot.config.get('session_token'),
- "access_token": get_access_token()
- })
- if await __V1_check_auth():
- return ChatGPTBrowserChatbot(bot, account.mode)
+ async with httpx.AsyncClient(proxies=config.get('proxy', None), timeout=60, trust_env=True) as client:
+ resp = await client.post(
+ url=f"{V1.BASE_URL}login",
+ json={
+ "username": config['email'],
+ "password": config['password'],
+ },
+ )
+ resp.raise_for_status()
+ config['access_token'] = resp.json().get('accessToken')
+ self.__save_login_cache(account=account, cache={
+ "access_token": config['access_token']
+ })
+ bot = V1Chatbot(config=config)
+ if await __V1_check_auth():
+ return ChatGPTBrowserChatbot(bot, account.mode)
# Invalidate cache
self.__save_login_cache(account=account, cache={})
raise Exception("All login method failed")
@@ -424,12 +534,12 @@ async def __login_openai_apikey(self, account):
logger.warning("在查询 API 额度时遇到问题,请自行确认额度。")
return account
- def pick(self, type: str):
- if type not in self.roundrobin:
- self.roundrobin[type] = itertools.cycle(self.bots[type])
- if len(self.bots[type]) == 0:
- raise NoAvailableBotException(type)
- return next(self.roundrobin[type])
+ def pick(self, llm: str):
+ if llm not in self.roundrobin:
+ self.roundrobin[llm] = itertools.cycle(self.bots[llm])
+ if len(self.bots[llm]) == 0:
+ raise NoAvailableBotException(llm)
+ return next(self.roundrobin[llm])
def bots_info(self):
from constants import LlmName
@@ -450,6 +560,17 @@ def bots_info(self):
bot_info += f"* {LlmName.ChatGLM.value} : 清华 ChatGLM-6B (本地)\n"
if len(self.bots['poe-web']) > 0:
bot_info += f"* {LlmName.PoeSage.value} : POE Sage 模型\n"
+ bot_info += f"* {LlmName.PoeGPT4.value} : POE ChatGPT4 模型\n"
+ bot_info += f"* {LlmName.PoeGPT432k.value} : POE ChatGPT4 32k 模型\n"
bot_info += f"* {LlmName.PoeClaude.value} : POE Claude 模型\n"
+ bot_info += f"* {LlmName.PoeClaude2.value} : POE Claude+ 模型\n"
+ bot_info += f"* {LlmName.PoeClaude100k.value} : POE Claude 100k 模型\n"
bot_info += f"* {LlmName.PoeChatGPT.value} : POE ChatGPT 模型\n"
+ bot_info += f"* {LlmName.PoeChatGPT16k.value} : POE ChatGPT 16k 模型\n"
+ bot_info += f"* {LlmName.PoeLlama2.value} : POE Llama2 模型\n"
+ bot_info += f"* {LlmName.PoePaLM.value} : POE PaLM 模型\n"
+ if len(self.bots['slack-accesstoken']) > 0:
+ bot_info += f"* {LlmName.SlackClaude.value} : Slack Claude 模型\n"
+ if len(self.bots['xinghuo-cookie']) > 0:
+ bot_info += f"* {LlmName.XunfeiXinghuo.value} : 星火大模型\n"
return bot_info
diff --git a/middlewares/baiducloud.py b/middlewares/baiducloud.py
index a5cec5ce..3e404450 100644
--- a/middlewares/baiducloud.py
+++ b/middlewares/baiducloud.py
@@ -64,6 +64,8 @@ async def check_and_update_access_token(self):
await self.get_access_token()
async def get_conclusion(self, text: str):
+ await self.check_and_update_access_token()
+
baidu_url = f"https://aip.baidubce.com/rest/2.0/solution/v1/text_censor/v2/user_defined" \
f"?access_token={self.access_token}"
headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Accept': 'application/json'}
@@ -108,15 +110,17 @@ async def handle_respond(self, session_id: str, prompt: str, rendered: str, resp
conclusion = f"{config.baiducloud.prompt_message}\n原因:{msg}"
return await action(session_id, prompt, conclusion, respond)
- except aiohttp.ClientError as e:
- logger.error(f"HTTP error occurred: {e}")
-
- await respond("[百度云文本审核] 判定出错\n以下是原消息:")
+ except Exception as e:
+ respond_message = "[百度云文本审核] 判定出错\n以下是原消息:"
+ if isinstance(e, aiohttp.ClientError):
+ error_message = f"[百度云文本审核] HTTP错误: {e}"
+ elif isinstance(e, json.JSONDecodeError):
+ error_message = f"[百度云文本审核] JSON解码错误: {e}"
+ else:
+ error_message = f"[百度云文本审核] 其他错误:{e}"
+ logger.error(error_message)
+ await respond(respond_message)
should_pass = True
- except json.JSONDecodeError as e:
- logger.error(f"[百度云文本审核] JSON decode error occurred: {e}")
- except StopIteration as e:
- logger.error(f"[百度云文本审核] StopIteration exception occurred: {e}")
if should_pass:
return await action(session_id, prompt, rendered, respond)
diff --git a/middlewares/draw_ratelimit.py b/middlewares/draw_ratelimit.py
index e8c85a61..50783ab0 100644
--- a/middlewares/draw_ratelimit.py
+++ b/middlewares/draw_ratelimit.py
@@ -11,15 +11,10 @@ class MiddlewareRatelimit():
def __init__(self):
...
-
def handle_draw_request(self, session_id: str, prompt: str):
_id = session_id.split('-', 1)[1] if '-' in session_id else session_id
rate_usage = manager.check_draw_exceed('好友' if session_id.startswith("friend-") else '群组', _id)
- if rate_usage >= 1:
- return config.ratelimit.draw_exceed
- return "1"
-
-
+ return config.ratelimit.draw_exceed if rate_usage >= 1 else "1"
def handle_draw_respond_completed(self, session_id: str, prompt: str):
key = '好友' if session_id.startswith("friend-") else '群组'
@@ -31,6 +26,6 @@ def handle_draw_respond_completed(self, session_id: str, prompt: str):
usage = manager.get_draw_usage(key, msg_id)
current_time = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(time.time()))
return config.ratelimit.draw_warning_msg.format(usage=usage['count'],
- limit=limit['rate'],
- current_time=current_time)
+ limit=limit['rate'],
+ current_time=current_time)
return "1"
diff --git a/middlewares/middlewares_loader.py b/middlewares/middlewares_loader.py
new file mode 100644
index 00000000..05a0da28
--- /dev/null
+++ b/middlewares/middlewares_loader.py
@@ -0,0 +1,25 @@
+import os
+import inspect
+import importlib
+from loguru import logger
+
+def load_middlewares(middlewares_dir='./middlewares'):
+ middlewares = []
+
+ # 遍历middlewares目录下的所有.py文件
+ for filename in os.listdir(middlewares_dir):
+ if filename.endswith('.py') and filename != '__init__.py':
+ module_name = filename[:-3] # 去掉.py后缀
+ module = importlib.import_module(f'middlewares.{module_name}')
+
+ # 遍历模块中的所有类
+ for name, obj in inspect.getmembers(module):
+ if inspect.isclass(obj):
+ if 'middleware' in name.lower() and name != 'Middleware':
+ # 检查类是否有指定的方法
+ methods = ['handle_request', 'handle_respond', 'on_respond', 'handle_respond_completed']
+ if any(hasattr(obj, method) for method in methods):
+ logger.debug(f'加载中间件 {name}')
+ middlewares.append(obj())
+
+ return middlewares
diff --git a/middlewares/timeout.py b/middlewares/timeout.py
index be25a98b..36b57235 100644
--- a/middlewares/timeout.py
+++ b/middlewares/timeout.py
@@ -32,6 +32,12 @@ async def handle_request(self, session_id: str, prompt: str, respond: Callable,
del self.timeout_task[session_id]
except asyncio.TimeoutError:
await respond(config.response.cancel_wait_too_long)
+ except Exception as e:
+ logger.error(f"发生错误: {e}")
+ if session_id in self.timeout_task:
+ self.timeout_task[session_id].cancel()
+ del self.timeout_task[session_id]
+ raise e
async def on_respond(self, session_id: str, prompt: str, rendered: str):
if rendered and session_id in self.timeout_task:
diff --git a/platforms/ariadne_bot.py b/platforms/ariadne_bot.py
index 923eaefe..0aa0d9cc 100644
--- a/platforms/ariadne_bot.py
+++ b/platforms/ariadne_bot.py
@@ -250,7 +250,6 @@ async def update_rate(app: Ariadne, event: MessageEvent, sender: Union[Friend, M
raise ExecutionStop()
-
@cmd.command(".查看 {msg_type: str} {msg_id: str} 的使用情况")
async def show_rate(app: Ariadne, event: MessageEvent, msg_type: str, msg_id: str):
try:
@@ -291,7 +290,6 @@ async def show_rate(app: Ariadne, event: MessageEvent, msg_type: str, msg_id: st
raise ExecutionStop()
-
@cmd.command(".预设列表")
async def presets_list(app: Ariadne, event: MessageEvent, sender: Union[Friend, Member]):
try:
diff --git a/platforms/discord_bot.py b/platforms/discord_bot.py
index 84b3df7e..772922a6 100644
--- a/platforms/discord_bot.py
+++ b/platforms/discord_bot.py
@@ -11,7 +11,7 @@
sys.path.append(os.getcwd())
-from constants import config
+from constants import config, BotPlatform
intents = discord.Intents.default()
intents.typing = False
@@ -64,7 +64,7 @@ async def response(msg):
await handle_message(response,
f"{'friend' if isinstance(message.channel, discord.DMChannel) else 'group'}-{message.channel.id}",
message.content.replace(f"<@{bot_id}>", "").strip(), is_manager=False,
- nickname=message.author.name)
+ nickname=message.author.name, request_from=BotPlatform.DiscordBot)
@bot.event
async def on_message(message):
diff --git a/platforms/http_service.py b/platforms/http_service.py
index b7a74e3c..8c43e2ff 100644
--- a/platforms/http_service.py
+++ b/platforms/http_service.py
@@ -9,7 +9,7 @@
from loguru import logger
from quart import Quart, request
-from constants import config
+from constants import config, BotPlatform
from universal import handle_message
app = Quart(__name__)
@@ -107,7 +107,8 @@ async def response(msg):
response,
bot_request.session_id,
bot_request.message,
- nickname=bot_request.username
+ nickname=bot_request.username,
+ request_from=BotPlatform.HttpService
)
bot_request.set_result_status(RESPONSE_DONE)
bot_request.done = True
@@ -147,7 +148,7 @@ async def v2_chat_response():
request_dic.pop(request_id)
else:
bot_request.result.pop_all()
- logger.debug(f"Bot request {request_id} response -> \n{response}")
+ logger.debug(f"Bot request {request_id} response -> \n{response[:100]}")
return response
diff --git a/platforms/onebot_bot.py b/platforms/onebot_bot.py
index 1ae4d10f..ec1a3fa8 100644
--- a/platforms/onebot_bot.py
+++ b/platforms/onebot_bot.py
@@ -1,11 +1,13 @@
import re
import time
+from base64 import b64decode, b64encode
from typing import Union, Optional
+import aiohttp
from aiocqhttp import CQHttp, Event, MessageSegment
from charset_normalizer import from_bytes
from graia.ariadne.message.chain import MessageChain
-from graia.ariadne.message.element import Image, At, Plain, Voice
+from graia.ariadne.message.element import Image as GraiaImage, At, Plain, Voice
from graia.ariadne.message.parser.base import DetectPrefix
from graia.broadcast import ExecutionStop
from loguru import logger
@@ -27,15 +29,37 @@ def __init__(self, name: Union[bool, str] = True) -> None:
async def __call__(self, chain: MessageChain, event: Event) -> Optional[MessageChain]:
first = chain[0]
- if isinstance(first, At) and first.target == config.onebot.qq:
+ if isinstance(first, At) and first.target == event.self_id:
return MessageChain(chain.__root__[1:], inline=True).removeprefix(" ")
elif isinstance(first, Plain):
- member_info = await bot.get_group_member_info(group_id=event.group_id, user_id=config.onebot.qq)
+ member_info = await bot.get_group_member_info(group_id=event.group_id, user_id=event.self_id)
if member_info.get("nickname") and chain.startswith(member_info.get("nickname")):
return chain.removeprefix(" ")
raise ExecutionStop
+class Image(GraiaImage):
+ async def get_bytes(self) -> bytes:
+ """尝试获取消息元素的 bytes, 注意, 你无法获取并不包含 url 且不包含 base64 属性的本元素的 bytes.
+
+ Raises:
+ ValueError: 你尝试获取并不包含 url 属性的本元素的 bytes.
+
+ Returns:
+ bytes: 元素原始数据
+ """
+ if self.base64:
+ return b64decode(self.base64)
+ if not self.url:
+ raise ValueError("you should offer a url.")
+ async with aiohttp.ClientSession() as session:
+ async with session.get(self.url) as response:
+ response.raise_for_status()
+ data = await response.read()
+ self.base64 = b64encode(data).decode("ascii")
+ return data
+
+
# TODO: use MessageSegment
# https://github.com/nonebot/aiocqhttp/blob/master/docs/common-topics.md
def transform_message_chain(text: str) -> MessageChain:
@@ -59,8 +83,11 @@ def transform_message_chain(text: str) -> MessageChain:
if text_segment and not text_segment.startswith('[CQ:reply,'):
messages.append(Plain(text_segment))
if cq_type == "at":
+ if params.get('qq') == 'all':
+ continue
params["target"] = int(params.pop("qq"))
- messages.append(message_class(**params))
+ elem = message_class(**params)
+ messages.append(elem)
start = match.end()
if text_segment := text[start:]:
messages.append(Plain(text_segment))
@@ -71,7 +98,7 @@ def transform_message_chain(text: str) -> MessageChain:
def transform_from_message_chain(chain: MessageChain):
result = ''
for elem in chain:
- if isinstance(elem, Image):
+ if isinstance(elem, (Image, GraiaImage)):
result = result + MessageSegment.image(f"base64://{elem.base64}")
elif isinstance(elem, Plain):
result = result + MessageSegment.text(str(elem))
@@ -127,10 +154,11 @@ async def _(event: Event):
msg.display,
chain,
is_manager=event.user_id == config.onebot.manager_qq,
- nickname=event.sender.get("nickname", "好友")
+ nickname=event.sender.get("nickname", "好友"),
+ request_from=constants.BotPlatform.Onebot
)
except Exception as e:
- print(e)
+ logger.exception(e)
GroupTrigger = [MentionMe(config.trigger.require_mention != "at"), DetectPrefix(
@@ -157,7 +185,8 @@ async def _(event: Event):
f"group-{event.group_id}",
chain.display,
is_manager=event.user_id == config.onebot.manager_qq,
- nickname=event.sender.get("nickname", "群友")
+ nickname=event.sender.get("nickname", "群友"),
+ request_from=constants.BotPlatform.Onebot
)
diff --git a/platforms/telegram_bot.py b/platforms/telegram_bot.py
index 1da87245..bfd4c973 100644
--- a/platforms/telegram_bot.py
+++ b/platforms/telegram_bot.py
@@ -8,7 +8,7 @@
from telegram.request import HTTPXRequest
from middlewares.ratelimit import manager as ratelimit_manager
-from constants import config
+from constants import config, BotPlatform
from universal import handle_message
@@ -50,7 +50,8 @@ async def response(msg):
f"{type}-{update.message.chat.id}",
update.message.text.replace(f"@{bot_username}", '').strip(),
is_manager=update.message.from_user.id == config.telegram.manager_chat,
- nickname=update.message.from_user.full_name or "群友"
+ nickname=update.message.from_user.full_name or "群友",
+ request_from=BotPlatform.TelegramBot
)
diff --git a/platforms/wecom_bot.py b/platforms/wecom_bot.py
new file mode 100644
index 00000000..4d5801b4
--- /dev/null
+++ b/platforms/wecom_bot.py
@@ -0,0 +1,246 @@
+# -*- coding: utf-8 -*-
+import json
+import threading
+import time
+import asyncio
+import base64
+from io import BytesIO
+from loguru import logger
+from pydub import AudioSegment
+from quart import Quart, request, abort, make_response
+
+from graia.ariadne.message.chain import MessageChain
+from graia.ariadne.message.element import Image, Voice
+from graia.ariadne.message.element import Plain
+
+from wechatpy.work.crypto import WeChatCrypto
+from wechatpy.work.client import WeChatClient
+from wechatpy.exceptions import InvalidSignatureException
+from wechatpy.work.exceptions import InvalidCorpIdException
+from wechatpy.work import parse_message, create_reply
+
+import constants
+from constants import config
+from universal import handle_message
+
+CorpId = config.wecom.corp_id
+AgentId = config.wecom.agent_id
+Secret = config.wecom.secret
+TOKEN = config.wecom.token
+EncodingAESKey = config.wecom.encoding_aes_key
+crypto = WeChatCrypto(TOKEN, EncodingAESKey, CorpId)
+client = WeChatClient(CorpId, Secret)
+app = Quart(__name__)
+
+lock = threading.Lock()
+
+request_dic = {}
+
+RESPONSE_SUCCESS = "SUCCESS"
+RESPONSE_FAILED = "FAILED"
+RESPONSE_DONE = "DONE"
+
+
+class BotRequest:
+ def __init__(self, session_id, user_id, username, message, request_time):
+ self.session_id: str = session_id
+ self.user_id: str = user_id
+ self.username: str = username
+ self.message: str = message
+ self.result: ResponseResult = ResponseResult()
+ self.request_time = request_time
+ self.done: bool = False
+ """请求是否处理完毕"""
+
+ def set_result_status(self, result_status):
+ if not self.result:
+ self.result = ResponseResult()
+ self.result.result_status = result_status
+
+ def append_result(self, result_type, result):
+ with lock:
+ if result_type == "message":
+ self.result.message.append(result)
+ elif result_type == "voice":
+ self.result.voice.append(result)
+ elif result_type == "image":
+ self.result.image.append(result)
+
+
+class ResponseResult:
+ def __init__(self, message=None, voice=None, image=None, result_status=RESPONSE_SUCCESS):
+ self.result_status = result_status
+ self.message = self._ensure_list(message)
+ self.voice = self._ensure_list(voice)
+ self.image = self._ensure_list(image)
+
+ def _ensure_list(self, value):
+ if value is None:
+ return []
+ elif isinstance(value, list):
+ return value
+ else:
+ return [value]
+
+ def is_empty(self):
+ return not self.message and not self.voice and not self.image
+
+ def pop_all(self):
+ with lock:
+ self.message = []
+ self.voice = []
+ self.image = []
+
+ def to_json(self):
+ return json.dumps({
+ 'result': self.result_status,
+ 'message': self.message,
+ 'voice': self.voice,
+ 'image': self.image
+ })
+
+
+@app.route("/wechat", methods=["GET", "POST"])
+async def wechat():
+ signature = request.args.get("msg_signature", "")
+ timestamp = request.args.get("timestamp", "")
+ nonce = request.args.get("nonce", "")
+
+ if request.method == "GET":
+ echo_str = request.args.get("echostr", "")
+ try:
+ echo_str = crypto.check_signature(
+ signature, timestamp, nonce, echo_str)
+ except InvalidSignatureException:
+ abort(403)
+ return echo_str
+ else:
+ try:
+ msg = crypto.decrypt_message(await request.data, signature, timestamp, nonce)
+ except (InvalidSignatureException, InvalidCorpIdException):
+ abort(403)
+ msg = parse_message(msg)
+ logger.debug(msg)
+ if msg.type == "text":
+ reply = create_reply(msg.content, msg).render()
+
+ bot_request = construct_bot_request(msg)
+ asyncio.create_task(process_request(bot_request))
+ request_dic[bot_request.request_time] = bot_request
+
+ response = await make_response("ok")
+ response.status_code = 200
+ return response
+ else:
+ reply = create_reply("Can not handle this for now", msg).render()
+ return crypto.encrypt_message(reply, nonce, timestamp)
+
+
+async def reply(bot_request: BotRequest):
+ # client = WeChatClient(CorpId, Secret)
+ UserId = bot_request.user_id
+ response = bot_request.result.to_json()
+ if bot_request.done:
+ request_dic.pop(bot_request.request_time)
+ else:
+ bot_request.result.pop_all()
+ logger.debug(
+ f"Bot request {bot_request.request_time} response -> \n{response[:100]}")
+ if bot_request.result.message:
+ for msg in bot_request.result.message:
+ result = client.message.send_text(AgentId, UserId, msg)
+ logger.debug(f"Send message result -> {result}")
+ if bot_request.result.voice:
+ for voice in bot_request.result.voice:
+ # convert mp3 to amr
+ voice = convert_mp3_to_amr(voice)
+ voice_id = client.media.upload(
+ "voice", voice)["media_id"]
+ result = client.message.send_voice(AgentId, UserId, voice_id)
+ logger.debug(f"Send voice result -> {result}")
+ if bot_request.result.image:
+ for image in bot_request.result.image:
+ image_id = client.media.upload(
+ "image", BytesIO(base64.b64decode(image)))["media_id"]
+ result = client.message.send_image(AgentId, UserId, image_id)
+ logger.debug(f"Send image result -> {result}")
+
+
+def convert_mp3_to_amr(mp3):
+ mp3 = BytesIO(base64.b64decode(mp3))
+ amr = BytesIO()
+ AudioSegment.from_file(mp3,format="mp3").set_frame_rate(8000).set_channels(1).export(amr, format="amr", codec="libopencore_amrnb")
+ return amr
+
+
+def clear_request_dict():
+ logger.debug("Watch and clean request_dic.")
+ while True:
+ now = time.time()
+ keys_to_delete = []
+ for key, bot_request in request_dic.items():
+ if now - int(key)/1000 > 600:
+ logger.debug(f"Remove time out request -> {key}|{bot_request.session_id}|{bot_request.user_id}"
+ f"|{bot_request.message}")
+ keys_to_delete.append(key)
+ for key in keys_to_delete:
+ request_dic.pop(key)
+ time.sleep(60)
+
+
+def construct_bot_request(data):
+ session_id = f"wecom-{str(data.source)}" or "wecom-default_session"
+ user_id = data.source
+ username = client.user.get(user_id) or "某人"
+ message = data.content
+ logger.info(f"Get message from {session_id}[{user_id}]:\n{message}")
+ with lock:
+ bot_request = BotRequest(session_id, user_id, username,
+ message, str(int(time.time() * 1000)))
+ return bot_request
+
+
+async def process_request(bot_request: BotRequest):
+ async def response(msg):
+ logger.info(f"Got response msg -> {type(msg)} -> {msg}")
+ _resp = msg
+ if not isinstance(msg, MessageChain):
+ _resp = MessageChain(msg)
+ for ele in _resp:
+ if isinstance(ele, Plain) and str(ele):
+ bot_request.append_result("message", str(ele))
+ elif isinstance(ele, Image):
+ bot_request.append_result(
+ "image", ele.base64)
+ elif isinstance(ele, Voice):
+ # mp3
+ bot_request.append_result(
+ "voice", ele.base64)
+ else:
+ logger.warning(
+ f"Unsupported message -> {type(ele)} -> {str(ele)}")
+ bot_request.append_result("message", str(ele))
+ logger.debug(f"Start to process bot request {bot_request.request_time}.")
+ if bot_request.message is None or not str(bot_request.message).strip():
+ await response("message 不能为空!")
+ bot_request.set_result_status(RESPONSE_FAILED)
+ else:
+ await handle_message(
+ response,
+ bot_request.session_id,
+ bot_request.message,
+ nickname=bot_request.username,
+ request_from=constants.BotPlatform.WecomBot
+ )
+ bot_request.set_result_status(RESPONSE_DONE)
+ bot_request.done = True
+ logger.debug(f"Bot request {bot_request.request_time} done.")
+ await reply(bot_request)
+
+
+async def start_task():
+ """|coro|
+ 以异步方式启动
+ """
+ threading.Thread(target=clear_request_dict).start()
+ return await app.run_task(host=config.wecom.host, port=config.wecom.port, debug=config.wecom.debug)
diff --git a/renderer/merger.py b/renderer/merger.py
index ab73f0fb..37aaa53f 100644
--- a/renderer/merger.py
+++ b/renderer/merger.py
@@ -31,8 +31,9 @@ async def render(self, msg: str) -> Optional[Any]:
rendered = await self.parent.render(msg)
if not rendered:
return None
- else:
- self.hold.append(Plain(rendered + '\n'))
+ if not self.hold:
+ self.hold = []
+ self.hold.append(Plain(rendered + '\n'))
if time_delta < config.response.buffer_delay:
return None
elif self.hold:
diff --git a/requirements.txt b/requirements.txt
index 72b537cc..b14aa4a7 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,38 +1,45 @@
-graia-ariadne==0.11.3
+graia-ariadne==0.11.5
graiax-silkcoder
-revChatGPT==4.2.3
+revChatGPT~=6.8.6
toml~=0.10.2
Pillow>=9.3.0
-tinydb~=4.7.1
+tinydb~=4.8.0
loguru~=0.7.0
asyncio~=3.4.3
-pydantic~=1.10.7
+pydantic
markdown~=3.4.3
python-markdown-math~=0.8
-pygments~=2.15.0
+pygments~=2.15.1
imgkit~=1.2.3
qrcode~=7.4.2
-openai~=0.27.4
-EdgeGPT==0.1.25.1
-aiohttp~=3.8.4
-OpenAIAuth~=0.3.6
-urllib3~=1.26.15
+openai~=0.27.8
+EdgeGPT==0.12.1
+aiohttp~=3.8.5
+OpenAIAuth>=2.0.0
+urllib3~=2.0.4
+BingImageCreator~=0.4.2
-requests~=2.28.2
+requests~=2.31.0
uuid~=1.30
-python-telegram-bot==20.2
-aiocqhttp~=1.4.3
+python-telegram-bot==20.4
+aiocqhttp~=1.4.4
tls-client
python-dateutil~=2.8.2
discord.py
azure-cognitiveservices-speech
-poe-api~=0.3.0
+poe-api==0.5.2
-regex~=2023.3.23
-httpx
+regex~=2023.6.3
+httpx~=0.24.1
Quart==0.17.0
edge-tts
+wechatpy~=2.0.0a26
+pydub~=0.25.1
+
+creart~=0.2.2
+tiktoken~=0.4.0
+httpcore~=0.17.3
diff --git a/universal.py b/universal.py
index ab67d0ad..d50688fb 100644
--- a/universal.py
+++ b/universal.py
@@ -3,78 +3,60 @@
from typing import Callable
import httpcore
+import httpx
import openai
from graia.ariadne.message.chain import MessageChain
from graia.ariadne.message.element import Plain
-from httpx import HTTPStatusError, ConnectTimeout
+from httpx import ConnectTimeout
from loguru import logger
from requests.exceptions import SSLError, ProxyError, RequestException
from urllib3.exceptions import MaxRetryError
-from constants import botManager
+from command import CommandHandler
+from constants import botManager, BotPlatform
from constants import config
-from conversation import ConversationHandler, ConversationContext
+from conversation import ConversationHandler
from exceptions import PresetNotFoundException, BotRatelimitException, ConcurrentMessageException, \
- BotTypeNotFoundException, NoAvailableBotException, BotOperationNotSupportedException, CommandRefusedException
-from middlewares.baiducloud import MiddlewareBaiduCloud
-from middlewares.concurrentlock import MiddlewareConcurrentLock
-from middlewares.ratelimit import MiddlewareRatelimit
-from middlewares.timeout import MiddlewareTimeout
-from utils.text_to_speech import get_tts_voice
+ BotTypeNotFoundException, NoAvailableBotException, BotOperationNotSupportedException, CommandRefusedException, \
+ DrawingFailedException
-middlewares = [MiddlewareTimeout(), MiddlewareRatelimit(), MiddlewareBaiduCloud(), MiddlewareConcurrentLock()]
+from utils.text_to_speech import get_tts_voice, VoiceType
+from middlewares.middlewares_loader import load_middlewares
+command_handler = CommandHandler()
-def get_ping_response(conversation_context: ConversationContext):
- return config.response.ping_response.format(current_ai=conversation_context.type,
- supported_ai=botManager.bots_info())
+middlewares = load_middlewares()
async def handle_message(_respond: Callable, session_id: str, message: str,
chain: MessageChain = MessageChain("Unsupported"), is_manager: bool = False,
- nickname: str = '某人'):
- """正常聊天"""
- if not message.strip():
- return config.response.placeholder
-
- for r in config.trigger.ignore_regex:
- if re.match(r, message):
- logger.debug(f"此消息满足正则表达式: {r},忽略……")
- return
-
- # 此处为会话不存在时可以执行的指令
- conversation_handler = await ConversationHandler.get_handler(session_id)
+ nickname: str = '某人', request_from=BotPlatform.AriadneBot):
conversation_context = None
- # 指定前缀对话
- if ' ' in message and (config.trigger.allow_switching_ai or is_manager):
- for ai_type, prefixes in config.trigger.prefix_ai.items():
- for prefix in prefixes:
- if f'{prefix} ' in message:
- conversation_context = await conversation_handler.first_or_create(ai_type)
- message = message.removeprefix(f'{prefix} ')
- break
- else:
- # Continue if the inner loop wasn't broken.
- continue
- # Inner loop was broken, break the outer.
- break
- if not conversation_handler.current_conversation:
- conversation_handler.current_conversation = await conversation_handler.create(
- config.response.default_ai)
def wrap_request(n, m):
+ """
+ Wrapping send messages
+ """
+
async def call(session_id, message, conversation_context, respond):
await m.handle_request(session_id, message, respond, conversation_context, n)
return call
def wrap_respond(n, m):
+ """
+ Wrapping respond messages
+ """
+
async def call(session_id, message, rendered, respond):
await m.handle_respond(session_id, message, rendered, respond, n)
return call
async def respond(msg: str):
+ """
+ Respond method
+ """
if not msg:
return
ret = await _respond(msg)
@@ -89,11 +71,19 @@ async def respond(msg: str):
if not conversation_context:
conversation_context = conversation_handler.current_conversation
+ if not conversation_context:
+ return ret
# TTS Converting
if conversation_context.conversation_voice and isinstance(msg, MessageChain):
+ if request_from in [BotPlatform.Onebot, BotPlatform.AriadneBot]:
+ voice_type = VoiceType.Silk
+ elif request_from == BotPlatform.HttpService:
+ voice_type = VoiceType.Mp3
+ else:
+ voice_type = VoiceType.Wav
tasks = []
for elem in msg:
- task = asyncio.create_task(get_tts_voice(elem, conversation_context))
+ task = asyncio.create_task(get_tts_voice(elem, conversation_context, voice_type))
tasks.append(task)
while tasks:
done, tasks = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
@@ -105,144 +95,95 @@ async def respond(msg: str):
return ret
async def request(_session_id, prompt: str, conversation_context, _respond):
- try:
- task = None
-
- # 不带前缀 - 正常初始化会话
- if bot_type_search := re.search(config.trigger.switch_command, prompt):
- if not (config.trigger.allow_switching_ai or is_manager):
- await respond("不好意思,只有管理员才能切换AI!")
- return
- conversation_handler.current_conversation = (
- await conversation_handler.create(
- bot_type_search[1].strip()
- )
- )
- await respond(f"已切换至 {bot_type_search[1].strip()} AI,现在开始和我聊天吧!")
- return
- # 最终要选择的对话上下文
- if not conversation_context:
- conversation_context = conversation_handler.current_conversation
- # 此处为会话存在后可执行的指令
-
- # 重置会话
- if prompt in config.trigger.reset_command:
- task = conversation_context.reset()
+ """
+ Request method
+ """
- elif prompt in config.trigger.rollback_command:
- task = conversation_context.rollback()
+ task = [None]
- elif prompt in config.trigger.ping_command:
- await respond(get_ping_response(conversation_context))
- return
+ if not conversation_context:
+ conversation_context = conversation_handler.current_conversation
- elif voice_type_search := re.search(config.trigger.switch_voice, prompt):
- if not config.azure.tts_speech_key and config.text_to_speech.engine == "azure":
- await respond("未配置 Azure TTS 账户,无法切换语音!")
- conversation_context.conversation_voice = voice_type_search[1].strip()
- if conversation_context.conversation_voice == '关闭':
- conversation_context.conversation_voice = None
- await respond("已关闭语音,让我们继续聊天吧!")
- elif config.text_to_speech.engine == "vits":
- from utils.vits_tts import vits_api_instance
-
- try:
- if conversation_context.conversation_voice != "None":
- voice_id = conversation_context.conversation_voice
- voice_name = await vits_api_instance.set_id(voice_id)
- else:
- voice_name = await vits_api_instance.set_id(None)
- await respond(f"已切换至 {voice_name} 语音,让我们继续聊天吧!")
- except ValueError:
- await respond("提供的语音ID无效,请输入一个有效的数字ID。")
- except Exception as e:
- await respond(str(e))
- else:
- await respond(f"已切换至 {conversation_context.conversation_voice} 语音,让我们继续聊天吧!")
- return
+ # 命令处理
+ if await command_handler.handle_command(prompt, session_id, conversation_context, conversation_handler, respond,
+ is_manager, task):
+ return
- elif prompt in config.trigger.mixed_only_command:
- conversation_context.switch_renderer("mixed")
- await respond("已切换至图文混合模式,接下来我的回复将会以图文混合的方式呈现!")
- return
+ # 没有任务那就聊天吧!
+ if not task[0]:
+ task[0] = conversation_context.ask(prompt=prompt, chain=chain, name=nickname)
+ async for rendered in task[0]:
+ if rendered:
+ if not str(rendered).strip():
+ logger.warning("检测到内容为空的输出,已忽略")
+ continue
+ action = lambda session_id, prompt, rendered, respond: respond(rendered)
+ for m in middlewares:
+ action = wrap_respond(action, m)
+
+ # 开始处理 handle_response
+ await action(session_id, prompt, rendered, respond)
+ for m in middlewares:
+ await m.handle_respond_completed(session_id, prompt, respond)
- elif prompt in config.trigger.image_only_command:
- conversation_context.switch_renderer("image")
- await respond("已切换至纯图片模式,接下来我的回复将会以图片呈现!")
- return
+ try:
+ if not message.strip():
+ return await respond(config.response.placeholder)
- elif prompt in config.trigger.text_only_command:
- conversation_context.switch_renderer("text")
- await respond("已切换至纯文字模式,接下来我的回复将会以文字呈现(被吞除外)!")
+ for r in config.trigger.ignore_regex:
+ if re.match(r, message):
+ logger.debug(f"此消息满足正则表达式: {r},忽略……")
return
- elif switch_model_search := re.search(config.trigger.switch_model, prompt):
- model_name = switch_model_search[1].strip()
- if model_name in conversation_context.supported_models:
- if not (is_manager or model_name in config.trigger.allowed_models):
- await respond(f"不好意思,只有管理员才能切换到 {model_name} 模型!")
- else:
- await conversation_context.switch_model(model_name)
- await respond(f"已切换至 {model_name} 模型,让我们聊天吧!")
+ # 此处为会话不存在时可以执行的指令
+ conversation_handler = await ConversationHandler.get_handler(session_id)
+ # 指定前缀对话
+ if ' ' in message and (config.trigger.allow_switching_ai or is_manager):
+ for ai_type, prefixes in config.trigger.prefix_ai.items():
+ for prefix in prefixes:
+ if f'{prefix} ' in message:
+ conversation_context = await conversation_handler.first_or_create(ai_type)
+ message = message.removeprefix(f'{prefix} ')
+ break
else:
- await respond(
- f"当前的 AI 不支持切换至 {model_name} 模型,目前仅支持:{conversation_context.supported_models}!")
- return
-
- # 加载预设
- if preset_search := re.search(config.presets.command, prompt):
- logger.trace(f"{session_id} - 正在执行预设: {preset_search[1]}")
- async for _ in conversation_context.reset(): ...
- task = conversation_context.load_preset(preset_search[1])
- elif not conversation_context.preset:
- # 当前没有预设
- logger.trace(f"{session_id} - 未检测到预设,正在执行默认预设……")
- # 隐式加载不回复预设内容
- async for _ in conversation_context.load_preset('default'): ...
-
- # 没有任务那就聊天吧!
- if not task:
- task = conversation_context.ask(prompt=prompt, chain=chain, name=nickname)
- async for rendered in task:
- if rendered:
- if not str(rendered).strip():
- logger.warning("检测到内容为空的输出,已忽略")
- continue
- action = lambda session_id, prompt, rendered, respond: respond(rendered)
- for m in middlewares:
- action = wrap_respond(action, m)
-
- # 开始处理 handle_response
- await action(session_id, prompt, rendered, respond)
- for m in middlewares:
- await m.handle_respond_completed(session_id, prompt, respond)
- except CommandRefusedException as e:
- await respond(str(e))
- except openai.error.InvalidRequestError as e:
- await respond(f"服务器拒绝了您的请求,原因是{str(e)}")
- except BotOperationNotSupportedException:
- await respond("暂不支持此操作,抱歉!")
- except ConcurrentMessageException as e: # Chatbot 账号同时收到多条消息
- await respond(config.response.error_request_concurrent_error)
- except (BotRatelimitException, HTTPStatusError) as e: # Chatbot 账号限流
- await respond(config.response.error_request_too_many.format(exc=e))
- except NoAvailableBotException as e: # 预设不存在
- await respond(f"当前没有可用的{e}账号,不支持使用此 AI!")
- except BotTypeNotFoundException as e: # 预设不存在
- respond_msg = f"AI类型{e}不存在,请检查你的输入是否有问题!目前仅支持:\n"
- respond_msg += botManager.bots_info()
- await respond(respond_msg)
- except PresetNotFoundException: # 预设不存在
- await respond("预设不存在,请检查你的输入是否有问题!")
- except (RequestException, SSLError, ProxyError, MaxRetryError, ConnectTimeout, ConnectTimeout, httpcore.ReadTimeout) as e: # 网络异常
- await respond(config.response.error_network_failure.format(exc=e))
- except Exception as e: # 未处理的异常
- logger.exception(e)
- await respond(config.response.error_format.format(exc=e))
-
- action = request
- for m in middlewares:
- action = wrap_request(action, m)
-
- # 开始处理
- await action(session_id, message.strip(), conversation_context, respond)
+ # Continue if the inner loop wasn't broken.
+ continue
+ # Inner loop was broken, break the outer.
+ break
+ if not conversation_handler.current_conversation:
+ conversation_handler.current_conversation = await conversation_handler.create(
+ config.response.default_ai)
+
+ action = request
+ for m in middlewares:
+ action = wrap_request(action, m)
+
+ # 开始处理
+ await action(session_id, message.strip(), conversation_context, respond)
+ except DrawingFailedException as e:
+ logger.exception(e)
+ await _respond(config.response.error_drawing.format(exc=e.__cause__ or '未知'))
+ except CommandRefusedException as e:
+ await _respond(str(e))
+ except openai.error.InvalidRequestError as e:
+ await _respond(f"服务器拒绝了您的请求,原因是: {str(e)}")
+ except BotOperationNotSupportedException:
+ await _respond("暂不支持此操作,抱歉!")
+ except ConcurrentMessageException as e: # Chatbot 账号同时收到多条消息
+ await _respond(config.response.error_request_concurrent_error)
+ except BotRatelimitException as e: # Chatbot 账号限流
+ await _respond(config.response.error_request_too_many.format(exc=e))
+ except NoAvailableBotException as e: # 预设不存在
+ await _respond(f"当前没有可用的{e}账号,不支持使用此 AI!")
+ except BotTypeNotFoundException as e: # 预设不存在
+ respond_msg = f"AI类型{e}不存在,请检查你的输入是否有问题!目前仅支持:\n"
+ respond_msg += botManager.bots_info()
+ await _respond(respond_msg)
+ except PresetNotFoundException: # 预设不存在
+ await _respond("预设不存在,请检查你的输入是否有问题!")
+ except (RequestException, SSLError, ProxyError, MaxRetryError, ConnectTimeout, ConnectTimeout,
+ httpcore.ReadTimeout, httpx.TimeoutException) as e: # 网络异常
+ await _respond(config.response.error_network_failure.format(exc=e))
+ except Exception as e: # 未处理的异常
+ logger.exception(e)
+ await _respond(config.response.error_format.format(exc=e))
diff --git a/utils/azure_tts.py b/utils/azure_tts.py
index 8dc3ac23..f82c1edc 100644
--- a/utils/azure_tts.py
+++ b/utils/azure_tts.py
@@ -25,7 +25,7 @@ async def encode_to_silk(a=None):
async def synthesize_speech(text: str, output_file: str,
- voice: str = "en-SG-WayneNeural"): # Singapore English, Wayne
+ voice):
if not config.azure.tts_speech_key:
logger.warning("[Azure TTS] 没有检测到 tts_speech_key,不进行语音转换。")
return False
@@ -36,7 +36,7 @@ def create_synthesizer():
speech_key, service_region = config.azure.tts_speech_key, config.azure.tts_speech_service_region
speech_config = speechsdk.SpeechConfig(subscription=speech_key, region=service_region)
# https://learn.microsoft.com/en-us/azure/cognitive-services/speech-service/language-support?tabs=tts#neural-voices
- speech_config.set_property(speechsdk.PropertyId.SpeechServiceConnection_SynthVoice, voice)
+ speech_config.set_property(speechsdk.PropertyId.SpeechServiceConnection_SynthVoice, voice.full_name)
audio_config = speechsdk.audio.AudioOutputConfig(filename=output_file)
return speechsdk.SpeechSynthesizer(speech_config=speech_config, audio_config=audio_config)
diff --git a/utils/edge_tts.py b/utils/edge_tts.py
index 3222125b..13abbb8f 100644
--- a/utils/edge_tts.py
+++ b/utils/edge_tts.py
@@ -2,22 +2,40 @@
from loguru import logger
import edge_tts
+from constants import config
+from utils.text_to_speech import TtsVoice, TtsVoiceManager
-async def edge_tts_speech(text: str, voice_name: str, path: str):
+edge_tts_voices = {}
+
+
+async def load_edge_tts_voices():
+ if edge_tts_voices:
+ return edge_tts_voices
+ for el in await edge_tts.list_voices():
+ if tts_voice := TtsVoice.parse(
+ "edge", el.get('ShortName', ''), el.get('Gender', None)
+ ):
+ edge_tts_voices[tts_voice.alias] = tts_voice
+ return edge_tts_voices
+
+
+async def edge_tts_speech(text: str, tts_voice: TtsVoice, path: str):
try:
- communicate = edge_tts.Communicate(text, voice_name)
- await communicate.save(f"{path}.mp3")
- return True
+ communicate = edge_tts.Communicate(text, tts_voice.full_name)
+ output_path = path if path.endswith(".mp3") else f"{path}.mp3"
+ await communicate.save(output_path)
+ return output_path
except NoAudioReceived:
raise ValueError("语音生成失败,请检查音色设置是否正确。")
except ValueError as e:
if str(e).startswith("Invalid voice"):
raise ValueError(
- f"不支持的音色:{voice_name}"
- + "\n音色列表:"
- + str([el.get('ShortName', '') for el in await edge_tts.list_voices()])
+ f"不支持的音色:{tts_voice.full_name}"
+ + "\n可用音色列表:"
+ + str([v.alias for v in await TtsVoiceManager.list_tts_voices(
+ "edge", config.text_to_speech.default_voice_prefix)])
)
except Exception as err:
logger.exception(err)
logger.error("[Edge TTS] API error: ", err)
- return False
+ return None
diff --git a/utils/exithooks.py b/utils/exithooks.py
index a2489543..5400d412 100644
--- a/utils/exithooks.py
+++ b/utils/exithooks.py
@@ -1,7 +1,9 @@
import atexit
import sys
+import os
from loguru import logger
-
+import signal
+from constants import config
class ExitHooks(object):
def __init__(self):
@@ -28,16 +30,25 @@ def exc_handler(self, exc_type, exc, *args):
def foo():
if hooks.exit_code is not None or hooks.exception is not None:
- if type(hooks.exception) is KeyboardInterrupt:
+ if isinstance(hooks.exception, (KeyboardInterrupt, type(None))):
return
logger.error("看样子程序似乎没有正常退出。")
logger.exception(hooks.exception)
logger.error("你可以在这里阅读常见问题的解决方案:")
logger.error("https://github.com/lss233/chatgpt-mirai-qq-bot/issues/85")
+ raise hooks.exception
atexit.register(foo)
+
+def exit_gracefully(signal, frame):
+ if config.http:
+ logger.warning("检测到HTTP配置,将强制关闭程序……")
+ os._exit(0)
+ logger.warning("程序即将退出...".format(signal))
+ sys.exit(0)
+
+
def hook():
- # 仅仅是为了防止 IDE 自动优化掉 import
- pass
\ No newline at end of file
+ signal.signal(signal.SIGINT, exit_gracefully)
diff --git a/utils/network.py b/utils/network.py
index 99735b59..45133a5d 100644
--- a/utils/network.py
+++ b/utils/network.py
@@ -3,14 +3,14 @@
def is_open(ip, port):
"""Check if a host and port is open"""
- s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
- s.settimeout(5)
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ sock.settimeout(5)
try:
# True if open, False if not
- is_open = s.connect_ex((ip, int(port))) == 0
- if is_open:
- s.shutdown(socket.SHUT_RDWR)
+ is_port_open = sock.connect_ex((ip, int(port))) == 0
+ if is_port_open:
+ sock.shutdown(socket.SHUT_RDWR)
except Exception:
- is_open = False
- s.close()
- return is_open
+ is_port_open = False
+ sock.close()
+ return is_port_open
diff --git a/utils/text_to_img.py b/utils/text_to_img.py
index 2f061e1f..e76ca38e 100644
--- a/utils/text_to_img.py
+++ b/utils/text_to_img.py
@@ -10,6 +10,7 @@
from typing import Optional
import aiohttp
+import unicodedata
import asyncio
import imgkit
from pydantic import BaseModel
@@ -20,7 +21,6 @@
import markdown
import qrcode
-import unicodedata
from PIL import Image
from PIL import ImageDraw, ImageFont
from charset_normalizer import from_bytes
@@ -333,16 +333,19 @@ async def text_to_image(text):
with StringIO(html) as input_file:
ok = False
try:
- # 调用imgkit将html转为图片
- ok = await asyncio.get_event_loop().run_in_executor(None, imgkit.from_file, input_file,
- temp_jpg_filename, {
- "enable-local-file-access": "",
- "allow": asset_folder,
- "width": config.text_to_image.width, # 图片宽度
- }, None, None, None, imgkit_config)
- # 调用PIL将图片读取为 JPEG,RGB 格式
- image = Image.open(temp_jpg_filename, formats=['PNG']).convert('RGB')
- ok = True
+ if config.text_to_image.wkhtmltoimage:
+ # 调用imgkit将html转为图片
+ ok = await asyncio.get_event_loop().run_in_executor(None, imgkit.from_file, input_file,
+ temp_jpg_filename, {
+ "enable-local-file-access": "",
+ "allow": asset_folder,
+ "width": config.text_to_image.width, # 图片宽度
+ }, None, None, None, imgkit_config)
+ # 调用PIL将图片读取为 JPEG,RGB 格式
+ image = Image.open(temp_jpg_filename, formats=['PNG']).convert('RGB')
+ ok = True
+ else:
+ ok = False
except Exception as e:
logger.exception(e)
finally:
@@ -357,6 +360,7 @@ async def text_to_image(text):
return image
+
async def to_image(text) -> GraiaImage:
img = await text_to_image(text=str(text))
b = BytesIO()
diff --git a/utils/text_to_speech.py b/utils/text_to_speech.py
index f1960756..b2e67405 100644
--- a/utils/text_to_speech.py
+++ b/utils/text_to_speech.py
@@ -1,4 +1,5 @@
import os
+from enum import Enum
from tempfile import NamedTemporaryFile
from typing import Optional
@@ -8,41 +9,149 @@
from constants import config
from utils.azure_tts import synthesize_speech, encode_to_silk
-from utils.edge_tts import edge_tts_speech
+tts_voice_dic = {}
+"""各引擎的音色列表"""
-async def get_tts_voice(elem, conversation_context) -> Optional[Voice]:
+
+class VoiceType(Enum):
+ Wav = "wav"
+ Mp3 = "mp3"
+ Silk = "silk"
+
+
+class TtsVoice:
+
+ def __init__(self):
+ self.engine = None
+ """参考:edge, azure, vits"""
+ self.gender = None
+ """参考:Male, Female"""
+ self.full_name = None
+ """参考:zh-CN-liaoning-XiaobeiNeural, af-ZA-AdriNeural, am-ET-MekdesNeural"""
+ self.lang = None
+ """参考:zh, af, am"""
+ self.region = None
+ """参考:CN, ZA, ET"""
+ self.name = None
+ """参考:XiaobeiNeural, AdriNeural, MekdesNeural"""
+ self.alias = None
+ """参考:xiaobei, adri, mekdes"""
+ self.sub_region = None
+ """参考:liaoning"""
+
+ def description(self):
+ return f"{self.alias}: {self.full_name}{f' - {self.gender}' if self.gender else ''}"
+
+ @staticmethod
+ def parse(engine, voice: str, gender=None):
+ tts_voice = TtsVoice()
+ tts_voice.engine = engine
+ tts_voice.full_name = voice
+ tts_voice.gender = gender
+ if engine in ["edge", "azure"]:
+ """如:zh-CN-liaoning-XiaobeiNeural、uz-UZ-SardorNeural"""
+ voice_info = voice.split("-")
+ if len(voice_info) < 3:
+ return None
+ lang = voice_info[0]
+ region = voice_info[1]
+ if len(voice_info) == 4:
+ sub_region = voice_info[2]
+ name = voice_info[3]
+ else:
+ sub_region = None
+ name = voice_info[2]
+ alias = name.replace("Neural", "").lower()
+ tts_voice.lang = lang
+ tts_voice.region = region
+ tts_voice.name = name
+ tts_voice.alias = alias
+ tts_voice.sub_region = sub_region
+ else:
+ tts_voice.lang = voice
+ tts_voice.alias = voice
+
+ return tts_voice
+
+
+class TtsVoiceManager:
+ """tts音色管理"""
+
+ @staticmethod
+ def parse_tts_voice(tts_engine, voice_name) -> TtsVoice:
+ if tts_engine != "edge":
+ # todo support other engines
+ return TtsVoice.parse(tts_engine, voice_name)
+ from utils.edge_tts import edge_tts_voices
+ if "edge" not in tts_voice_dic:
+ tts_voice_dic["edge"] = edge_tts_voices
+ _voice_dic = tts_voice_dic["edge"]
+ if _voice := TtsVoice.parse(tts_engine, voice_name):
+ return _voice_dic.get(_voice.alias, None)
+ if voice_name in _voice_dic:
+ return _voice_dic[voice_name]
+
+ @staticmethod
+ async def list_tts_voices(tts_engine, voice_prefix):
+ """获取可用哪些音色"""
+
+ def match_voice_prefix(full_name):
+ if isinstance(voice_prefix, str):
+ return full_name.startswith(voice_prefix)
+ if isinstance(voice_prefix, list):
+ for _prefix in voice_prefix:
+ if full_name.startswith(_prefix):
+ return True
+ return False
+
+ if tts_engine == "edge":
+ from utils.edge_tts import load_edge_tts_voices
+ if "edge" not in tts_voice_dic:
+ tts_voice_dic["edge"] = await load_edge_tts_voices()
+ _voice_dic = tts_voice_dic["edge"]
+ return [v for v in _voice_dic.values() if voice_prefix is None or match_voice_prefix(v.full_name)]
+ # todo support other engines
+ return []
+
+
+async def get_tts_voice(elem, conversation_context, voice_type=VoiceType.Wav) -> Optional[Voice]:
if not isinstance(elem, Plain) or not str(elem):
return None
- output_file = NamedTemporaryFile(mode='w+b', suffix='.wav', delete=False)
+ voice_suffix = f".{voice_type.value}"
+
+ output_file = NamedTemporaryFile(mode='w+b', suffix=voice_suffix, delete=False)
output_file.close()
- logger.debug(f"[TextToSpeech] 开始转换语音 - {output_file.name} - {conversation_context.session_id}")
+ logger.debug(f"[TextToSpeech] 开始转换语音 - {conversation_context.session_id}")
if config.text_to_speech.engine == "vits":
from utils.vits_tts import vits_api_instance
- if config.mirai or config.onebot:
- output_file.name = output_file.name.split(".")[0] + ".silk"
- if await vits_api_instance.process_message(str(elem), output_file.name):
+ if await vits_api_instance.process_message(str(elem), output_file.name, voice_type.value):
logger.debug(f"[TextToSpeech] 语音转换完成 - {output_file.name} - {conversation_context.session_id}")
return Voice(path=output_file.name)
elif config.text_to_speech.engine == "azure":
+ tts_output_file_name = (f"{output_file.name}.{VoiceType.Wav.value}"
+ if voice_type == VoiceType.Silk else output_file.name)
if await synthesize_speech(
str(elem),
- output_file.name,
+ tts_output_file_name,
conversation_context.conversation_voice
):
- voice = Voice(path=output_file.name)
- if config.mirai or config.onebot:
+ voice = Voice(path=tts_output_file_name)
+ if voice_type == VoiceType.Silk:
voice = Voice(data_bytes=await encode_to_silk(await voice.get_bytes()))
logger.debug(f"[TextToSpeech] 语音转换完成 - {output_file.name} - {conversation_context.session_id}")
return voice
elif config.text_to_speech.engine == "edge":
- if await edge_tts_speech(str(elem), conversation_context.conversation_voice, output_file.name):
- output_file.name = f"{output_file.name}.mp3"
+ from utils.edge_tts import edge_tts_speech
+ tts_output_file_name = await edge_tts_speech(
+ str(elem), conversation_context.conversation_voice, output_file.name)
+ if tts_output_file_name:
+ output_file.name = tts_output_file_name
voice = Voice(path=output_file.name)
- if config.mirai or config.onebot:
+ if voice_type == VoiceType.Silk:
voice = Voice(data_bytes=await encode_to_silk(await voice.get_bytes()))
logger.debug(f"[TextToSpeech] 语音转换完成 - {output_file.name} - {conversation_context.session_id}")
return voice
diff --git a/utils/vits_tts.py b/utils/vits_tts.py
index 7bb99d67..539de54e 100644
--- a/utils/vits_tts.py
+++ b/utils/vits_tts.py
@@ -1,7 +1,7 @@
import regex as re
from loguru import logger
from constants import config
-from aiohttp import ClientSession, ClientTimeout
+from aiohttp import ClientSession, ClientTimeout, FormData
__all__ = ['VitsAPI']
@@ -21,14 +21,15 @@ async def initialize(self, new_id=None):
self.initialized = True
def check_id_exists(self, json_list, given_id):
- for item in json_list:
- for key, value in item.items():
- if str(given_id) in [key, value]:
- return key, value
+ if json_list["status"] == "success":
+ id = json_list["id"]
+ if str(given_id) == str(id):
+ name = json_list["name"]
+ return id, name
return None, None
async def set_id(self, new_id):
- json_array = await self.get_json_array()
+ json_array = await self.get_json_data(new_id)
id_found, voice_name = self.check_id_exists(json_array, new_id)
if not voice_name:
@@ -37,6 +38,20 @@ async def set_id(self, new_id):
self.id = id_found
return voice_name
+ async def get_json_data(self, new_id):
+ url = f"{config.vits.api_url}/check"
+
+ try:
+ async with ClientSession(timeout=ClientTimeout(total=config.vits.timeout)) as session:
+ form_data = FormData()
+ form_data.add_field("model", "vits")
+ form_data.add_field("id", new_id)
+ async with session.post(url=url, data=form_data) as res:
+ return await res.json()
+ except Exception as e:
+ logger.error(f"获取语音音色列表失败: {str(e)}")
+ raise Exception("获取语音音色列表失败,请检查网络连接和API设置")
+
async def get_json_array(self):
url = f"{config.vits.api_url}/speakers"
@@ -51,8 +66,6 @@ async def get_json_array(self):
raise Exception("获取语音音色列表失败,请检查网络连接和API设置")
async def voice_speakers_check(self, new_id=None):
- json_array = await self.get_json_array()
-
try:
if new_id is not None:
integer_number = int(new_id)
@@ -60,7 +73,9 @@ async def voice_speakers_check(self, new_id=None):
integer_number = int(config.text_to_speech.default)
else:
raise ValueError("默认语音音色未设置,请检查配置文件")
- voice_name = self.check_id_exists(json_array, integer_number)
+
+ json_data = await self.get_json_data(integer_number)
+ _, voice_name = self.check_id_exists(json_data, integer_number)
except ValueError:
logger.error("vits引擎中音色只能为纯数字")
return None
@@ -70,8 +85,8 @@ async def voice_speakers_check(self, new_id=None):
return integer_number
- async def get_voice_data(self, text, lang, format):
- url = f"{config.vits.api_url}?text={text}&lang={lang}&id={self.id}&format={format}&length={config.vits.speed}"
+ async def get_voice_data(self, text, lang, voice_type):
+ url = f"{config.vits.api_url}?text={text}&lang={lang}&id={self.id}&format={voice_type}&length={config.vits.speed}"
async with ClientSession(timeout=ClientTimeout(total=config.vits.timeout)) as session:
try:
@@ -129,26 +144,17 @@ def linguistic_process(self, text):
else ''.join(matches)
)
- async def response(self, text, format, path):
+ async def response(self, text, voice_type, path):
text = self.linguistic_process(text)
- content = await self.get_voice_data(text, self.lang, format)
+ content = await self.get_voice_data(text, self.lang, voice_type)
if content is not None:
return self.save_voice_file(content, path)
- async def process_message(self, message, path):
+ async def process_message(self, message, path, voice_type):
if not self.initialized:
await self.initialize()
- return (
- await self.response(message, "silk", path)
- if config.mirai or config.onebot
- else await self.response(message, "wav", path)
- )
+ return await self.response(message, voice_type, path)
vits_api_instance = VitsAPI()
-
-
-async def vits_api(message: str, path: str):
- await vits_api_instance.initialize()
- return await vits_api_instance.process_message(message, path)