diff --git a/.env.template b/.env.template index 25addf2b3e5..82f44216ab8 100644 --- a/.env.template +++ b/.env.template @@ -66,4 +66,4 @@ ANTHROPIC_API_VERSION= ANTHROPIC_URL= ### (optional) -WHITE_WEBDEV_ENDPOINTS= \ No newline at end of file +WHITE_WEBDAV_ENDPOINTS= \ No newline at end of file diff --git a/.eslintrc.json b/.eslintrc.json index d229e86f250..5b5e88e67aa 100644 --- a/.eslintrc.json +++ b/.eslintrc.json @@ -1,4 +1,7 @@ { "extends": "next/core-web-vitals", - "plugins": ["prettier"] + "plugins": ["prettier", "unused-imports"], + "rules": { + "unused-imports/no-unused-imports": "warn" + } } diff --git a/.github/workflows/deploy_preview.yml b/.github/workflows/deploy_preview.yml index bdbb78c27c5..b988452433b 100644 --- a/.github/workflows/deploy_preview.yml +++ b/.github/workflows/deploy_preview.yml @@ -3,9 +3,7 @@ name: VercelPreviewDeployment on: pull_request_target: types: - - opened - - synchronize - - reopened + - review_requested env: VERCEL_TEAM: ${{ secrets.VERCEL_TEAM }} @@ -49,7 +47,7 @@ jobs: run: npm install --global vercel@latest - name: Cache dependencies - uses: actions/cache@v2 + uses: actions/cache@v4 id: cache-npm with: path: ~/.npm diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 00000000000..faf7205d9cb --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,39 @@ +name: Run Tests + +on: + push: + branches: + - main + tags: + - "!*" + pull_request: + types: + - review_requested + +jobs: + test: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v4 + + - name: Set up Node.js + uses: actions/setup-node@v3 + with: + node-version: 18 + cache: "yarn" + + - name: Cache node_modules + uses: actions/cache@v4 + with: + path: node_modules + key: ${{ runner.os }}-node_modules-${{ hashFiles('**/yarn.lock') }} + restore-keys: | + ${{ runner.os }}-node_modules- + + - name: Install dependencies + run: yarn install + + - name: Run Jest tests + run: yarn test:ci diff --git a/README.md b/README.md index c8b158956b3..b9e994e5055 100644 --- a/README.md +++ b/README.md @@ -12,15 +12,18 @@ One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4 一键免费部署你的跨平台私人 ChatGPT 应用, 支持 GPT3, GPT4 & Gemini Pro 模型。 +[![Saas][Saas-image]][saas-url] [![Web][Web-image]][web-url] [![Windows][Windows-image]][download-url] [![MacOS][MacOS-image]][download-url] [![Linux][Linux-image]][download-url] -[Web App](https://app.nextchat.dev/) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev) +[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [Web App](https://app.nextchat.dev) / [Desktop App](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [Discord](https://discord.gg/YCkeafCafC) / [Enterprise Edition](#enterprise-edition) / [Twitter](https://twitter.com/NextChatDev) -[网页版](https://app.nextchat.dev/) / [客户端](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) / [反馈](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) +[NextChatAI](https://nextchat.dev/chat) / [网页版](https://app.nextchat.dev) / [客户端](https://github.com/Yidadaa/ChatGPT-Next-Web/releases) / [企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) / [反馈](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) +[saas-url]: https://nextchat.dev/chat?utm_source=readme +[saas-image]: https://img.shields.io/badge/NextChat-Saas-green?logo=microsoftedge [web-url]: https://app.nextchat.dev/ [download-url]: https://github.com/Yidadaa/ChatGPT-Next-Web/releases [Web-image]: https://img.shields.io/badge/Web-PWA-orange?logo=microsoftedge @@ -28,7 +31,7 @@ One-Click to get a well-designed cross-platform ChatGPT web UI, with GPT3, GPT4 [MacOS-image]: https://img.shields.io/badge/-MacOS-black?logo=apple [Linux-image]: https://img.shields.io/badge/-Linux-333?logo=ubuntu -[Deploy on Zeabur](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [Deploy on Zeabur](https://zeabur.com/templates/ZBUEFA) [Open in Gitpod](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) +[Deploy on Zeabur](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [Deploy on Zeabur](https://zeabur.com/templates/ZBUEFA) [Open in Gitpod](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) [Open in Gitpod](https://www.bt.cn/new/download.html) [](https://monica.im/?utm=nxcrp) @@ -60,7 +63,7 @@ For enterprise inquiries, please contact: **business@nextchat.dev** 企业版咨询: **business@nextchat.dev** - + ## Features @@ -97,6 +100,7 @@ For enterprise inquiries, please contact: **business@nextchat.dev** ## What's New +- 🚀 v2.15.4 The Application supports using Tauri fetch LLM API, MORE SECURITY! [#5379](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5379) - 🚀 v2.15.0 Now supports Plugins! Read this: [NextChat-Awesome-Plugins](https://github.com/ChatGPTNextWeb/NextChat-Awesome-Plugins) - 🚀 v2.14.0 Now supports Artifacts & SD - 🚀 v2.10.1 support Google Gemini Pro model. @@ -134,6 +138,7 @@ For enterprise inquiries, please contact: **business@nextchat.dev** ## 最新动态 +- 🚀 v2.15.4 客户端支持Tauri本地直接调用大模型API,更安全![#5379](https://github.com/ChatGPTNextWeb/ChatGPT-Next-Web/issues/5379) - 🚀 v2.15.0 现在支持插件功能了!了解更多:[NextChat-Awesome-Plugins](https://github.com/ChatGPTNextWeb/NextChat-Awesome-Plugins) - 🚀 v2.14.0 现在支持 Artifacts & SD 了。 - 🚀 v2.10.1 现在支持 Gemini Pro 模型。 @@ -172,7 +177,7 @@ We recommend that you follow the steps below to re-deploy: ### Enable Automatic Updates -> If you encounter a failure of Upstream Sync execution, please manually sync fork once. +> If you encounter a failure of Upstream Sync execution, please [manually update code](./README.md#manually-updating-code). After forking the project, due to the limitations imposed by GitHub, you need to manually enable Workflows and Upstream Sync Action on the Actions page of the forked project. Once enabled, automatic updates will be scheduled every hour: @@ -329,9 +334,9 @@ To control custom models, use `+` to add a custom model, use `-` to hide a model User `-all` to disable all default models, `+all` to enable all default models. -For Azure: use `modelName@azure=deploymentName` to customize model name and deployment name. -> Example: `+gpt-3.5-turbo@azure=gpt35` will show option `gpt35(Azure)` in model list. -> If you only can use Azure model, `-all,+gpt-3.5-turbo@azure=gpt35` will `gpt35(Azure)` the only option in model list. +For Azure: use `modelName@Azure=deploymentName` to customize model name and deployment name. +> Example: `+gpt-3.5-turbo@Azure=gpt35` will show option `gpt35(Azure)` in model list. +> If you only can use Azure model, `-all,+gpt-3.5-turbo@Azure=gpt35` will `gpt35(Azure)` the only option in model list. For ByteDance: use `modelName@bytedance=deploymentName` to customize model name and deployment name. > Example: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx` will show option `Doubao-lite-4k(ByteDance)` in model list. @@ -340,7 +345,7 @@ For ByteDance: use `modelName@bytedance=deploymentName` to customize model name Change default model -### `WHITE_WEBDEV_ENDPOINTS` (optional) +### `WHITE_WEBDAV_ENDPOINTS` (optional) You can use this option if you want to increase the number of webdav service addresses you are allowed to access, as required by the format: - Each address must be a complete endpoint diff --git a/README_CN.md b/README_CN.md index beed396c5aa..3f339ea61f6 100644 --- a/README_CN.md +++ b/README_CN.md @@ -8,7 +8,7 @@ 一键免费部署你的私人 ChatGPT 网页应用,支持 GPT3, GPT4 & Gemini Pro 模型。 -[企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) /[演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N) +[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [企业版](#%E4%BC%81%E4%B8%9A%E7%89%88) / [演示 Demo](https://chat-gpt-next-web.vercel.app/) / [反馈 Issues](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [加入 Discord](https://discord.gg/zrhvHCr79N) [Deploy on Zeabur](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [Deploy on Zeabur](https://zeabur.com/templates/ZBUEFA) [Open in Gitpod](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) @@ -54,7 +54,7 @@ ### 打开自动更新 -> 如果你遇到了 Upstream Sync 执行错误,请手动 Sync Fork 一次! +> 如果你遇到了 Upstream Sync 执行错误,请[手动 Sync Fork 一次](./README_CN.md#手动更新代码)! 当你 fork 项目之后,由于 Github 的限制,需要手动去你 fork 后的项目的 Actions 页面启用 Workflows,并启用 Upstream Sync Action,启用之后即可开启每小时定时自动更新: @@ -202,7 +202,7 @@ ByteDance Api Url. 如果你想禁用从链接解析预制设置,将此环境变量设置为 1 即可。 -### `WHITE_WEBDEV_ENDPOINTS` (可选) +### `WHITE_WEBDAV_ENDPOINTS` (可选) 如果你想增加允许访问的webdav服务地址,可以使用该选项,格式要求: - 每一个地址必须是一个完整的 endpoint @@ -216,9 +216,9 @@ ByteDance Api Url. 用来控制模型列表,使用 `+` 增加一个模型,使用 `-` 来隐藏一个模型,使用 `模型名=展示名` 来自定义模型的展示名,用英文逗号隔开。 -在Azure的模式下,支持使用`modelName@azure=deploymentName`的方式配置模型名称和部署名称(deploy-name) -> 示例:`+gpt-3.5-turbo@azure=gpt35`这个配置会在模型列表显示一个`gpt35(Azure)`的选项。 -> 如果你只能使用Azure模式,那么设置 `-all,+gpt-3.5-turbo@azure=gpt35` 则可以让对话的默认使用 `gpt35(Azure)` +在Azure的模式下,支持使用`modelName@Azure=deploymentName`的方式配置模型名称和部署名称(deploy-name) +> 示例:`+gpt-3.5-turbo@Azure=gpt35`这个配置会在模型列表显示一个`gpt35(Azure)`的选项。 +> 如果你只能使用Azure模式,那么设置 `-all,+gpt-3.5-turbo@Azure=gpt35` 则可以让对话的默认使用 `gpt35(Azure)` 在ByteDance的模式下,支持使用`modelName@bytedance=deploymentName`的方式配置模型名称和部署名称(deploy-name) > 示例: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx`这个配置会在模型列表显示一个`Doubao-lite-4k(ByteDance)`的选项 diff --git a/README_JA.md b/README_JA.md index 6b8caadae6c..062c112629d 100644 --- a/README_JA.md +++ b/README_JA.md @@ -5,7 +5,7 @@ ワンクリックで無料であなた専用の ChatGPT ウェブアプリをデプロイ。GPT3、GPT4 & Gemini Pro モデルをサポート。 -[企業版](#企業版) / [デモ](https://chat-gpt-next-web.vercel.app/) / [フィードバック](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [Discordに参加](https://discord.gg/zrhvHCr79N) +[NextChatAI](https://nextchat.dev/chat?utm_source=readme) / [企業版](#企業版) / [デモ](https://chat-gpt-next-web.vercel.app/) / [フィードバック](https://github.com/Yidadaa/ChatGPT-Next-Web/issues) / [Discordに参加](https://discord.gg/zrhvHCr79N) [Zeaburでデプロイ](https://vercel.com/new/clone?repository-url=https%3A%2F%2Fgithub.com%2FChatGPTNextWeb%2FChatGPT-Next-Web&env=OPENAI_API_KEY&env=CODE&project-name=nextchat&repository-name=NextChat) [Zeaburでデプロイ](https://zeabur.com/templates/ZBUEFA) [Gitpodで開く](https://gitpod.io/#https://github.com/Yidadaa/ChatGPT-Next-Web) @@ -54,7 +54,7 @@ ### 自動更新を開く -> Upstream Sync の実行エラーが発生した場合は、手動で Sync Fork してください! +> Upstream Sync の実行エラーが発生した場合は、[手動で Sync Fork](./README_JA.md#手動でコードを更新する) してください! プロジェクトを fork した後、GitHub の制限により、fork 後のプロジェクトの Actions ページで Workflows を手動で有効にし、Upstream Sync Action を有効にする必要があります。有効化後、毎時の定期自動更新が可能になります: @@ -193,7 +193,7 @@ ByteDance API の URL。 リンクからのプリセット設定解析を無効にしたい場合は、この環境変数を 1 に設定します。 -### `WHITE_WEBDEV_ENDPOINTS` (オプション) +### `WHITE_WEBDAV_ENDPOINTS` (オプション) アクセス許可を与える WebDAV サービスのアドレスを追加したい場合、このオプションを使用します。フォーマット要件: - 各アドレスは完全なエンドポイントでなければなりません。 @@ -207,8 +207,8 @@ ByteDance API の URL。 モデルリストを管理します。`+` でモデルを追加し、`-` でモデルを非表示にし、`モデル名=表示名` でモデルの表示名をカスタマイズし、カンマで区切ります。 -Azure モードでは、`modelName@azure=deploymentName` 形式でモデル名とデプロイ名(deploy-name)を設定できます。 -> 例:`+gpt-3.5-turbo@azure=gpt35` この設定でモデルリストに `gpt35(Azure)` のオプションが表示されます。 +Azure モードでは、`modelName@Azure=deploymentName` 形式でモデル名とデプロイ名(deploy-name)を設定できます。 +> 例:`+gpt-3.5-turbo@Azure=gpt35` この設定でモデルリストに `gpt35(Azure)` のオプションが表示されます。 ByteDance モードでは、`modelName@bytedance=deploymentName` 形式でモデル名とデプロイ名(deploy-name)を設定できます。 > 例: `+Doubao-lite-4k@bytedance=ep-xxxxx-xxx` この設定でモデルリストに `Doubao-lite-4k(ByteDance)` のオプションが表示されます。 diff --git a/app/api/[provider]/[...path]/route.ts b/app/api/[provider]/[...path]/route.ts index 24aa5ec040f..5ac248d0c87 100644 --- a/app/api/[provider]/[...path]/route.ts +++ b/app/api/[provider]/[...path]/route.ts @@ -1,5 +1,5 @@ import { ApiPath } from "@/app/constant"; -import { NextRequest, NextResponse } from "next/server"; +import { NextRequest } from "next/server"; import { handle as openaiHandler } from "../../openai"; import { handle as azureHandler } from "../../azure"; import { handle as googleHandler } from "../../google"; @@ -10,6 +10,7 @@ import { handle as alibabaHandler } from "../../alibaba"; import { handle as moonshotHandler } from "../../moonshot"; import { handle as stabilityHandler } from "../../stability"; import { handle as iflytekHandler } from "../../iflytek"; +import { handle as xaiHandler } from "../../xai"; import { handle as proxyHandler } from "../../proxy"; async function handle( @@ -38,6 +39,8 @@ async function handle( return stabilityHandler(req, { params }); case ApiPath.Iflytek: return iflytekHandler(req, { params }); + case ApiPath.XAI: + return xaiHandler(req, { params }); case ApiPath.OpenAI: return openaiHandler(req, { params }); default: diff --git a/app/api/alibaba.ts b/app/api/alibaba.ts index 675d9f301aa..894b1ae4c04 100644 --- a/app/api/alibaba.ts +++ b/app/api/alibaba.ts @@ -1,6 +1,5 @@ import { getServerSideConfig } from "@/app/config/server"; import { - Alibaba, ALIBABA_BASE_URL, ApiPath, ModelProvider, @@ -10,7 +9,6 @@ import { prettyObject } from "@/app/utils/format"; import { NextRequest, NextResponse } from "next/server"; import { auth } from "@/app/api/auth"; import { isModelAvailableInServer } from "@/app/utils/model"; -import type { RequestPayload } from "@/app/client/platforms/openai"; const serverConfig = getServerSideConfig(); diff --git a/app/api/anthropic.ts b/app/api/anthropic.ts index d7b070247b7..7a44443710f 100644 --- a/app/api/anthropic.ts +++ b/app/api/anthropic.ts @@ -3,7 +3,6 @@ import { ANTHROPIC_BASE_URL, Anthropic, ApiPath, - DEFAULT_MODELS, ServiceProvider, ModelProvider, } from "@/app/constant"; diff --git a/app/api/auth.ts b/app/api/auth.ts index 95965ceec2d..d4ac66a113b 100644 --- a/app/api/auth.ts +++ b/app/api/auth.ts @@ -92,6 +92,9 @@ export function auth(req: NextRequest, modelProvider: ModelProvider) { systemApiKey = serverConfig.iflytekApiKey + ":" + serverConfig.iflytekApiSecret; break; + case ModelProvider.XAI: + systemApiKey = serverConfig.xaiApiKey; + break; case ModelProvider.GPT: default: if (req.nextUrl.pathname.includes("azure/deployments")) { diff --git a/app/api/azure.ts b/app/api/azure.ts index e2cb0c7e66b..39d872e8cf8 100644 --- a/app/api/azure.ts +++ b/app/api/azure.ts @@ -1,4 +1,3 @@ -import { getServerSideConfig } from "@/app/config/server"; import { ModelProvider } from "@/app/constant"; import { prettyObject } from "@/app/utils/format"; import { NextRequest, NextResponse } from "next/server"; diff --git a/app/api/baidu.ts b/app/api/baidu.ts index f4315d186da..0408b43c5bc 100644 --- a/app/api/baidu.ts +++ b/app/api/baidu.ts @@ -3,7 +3,6 @@ import { BAIDU_BASE_URL, ApiPath, ModelProvider, - BAIDU_OATUH_URL, ServiceProvider, } from "@/app/constant"; import { prettyObject } from "@/app/utils/format"; diff --git a/app/api/common.ts b/app/api/common.ts index 25decbf620e..b4c792d6ff0 100644 --- a/app/api/common.ts +++ b/app/api/common.ts @@ -1,11 +1,6 @@ import { NextRequest, NextResponse } from "next/server"; import { getServerSideConfig } from "../config/server"; -import { - DEFAULT_MODELS, - OPENAI_BASE_URL, - GEMINI_BASE_URL, - ServiceProvider, -} from "../constant"; +import { OPENAI_BASE_URL, ServiceProvider } from "../constant"; import { isModelAvailableInServer } from "../utils/model"; import { cloudflareAIGatewayUrl } from "../utils/cloudflare"; diff --git a/app/api/google.ts b/app/api/google.ts index 98fe469bfb7..707892c33d0 100644 --- a/app/api/google.ts +++ b/app/api/google.ts @@ -1,12 +1,7 @@ import { NextRequest, NextResponse } from "next/server"; import { auth } from "./auth"; import { getServerSideConfig } from "@/app/config/server"; -import { - ApiPath, - GEMINI_BASE_URL, - Google, - ModelProvider, -} from "@/app/constant"; +import { ApiPath, GEMINI_BASE_URL, ModelProvider } from "@/app/constant"; import { prettyObject } from "@/app/utils/format"; const serverConfig = getServerSideConfig(); @@ -28,7 +23,8 @@ export async function handle( }); } - const bearToken = req.headers.get("Authorization") ?? ""; + const bearToken = + req.headers.get("x-goog-api-key") || req.headers.get("Authorization") || ""; const token = bearToken.trim().replaceAll("Bearer ", "").trim(); const apiKey = token ? token : serverConfig.googleApiKey; @@ -96,8 +92,8 @@ async function request(req: NextRequest, apiKey: string) { }, 10 * 60 * 1000, ); - const fetchUrl = `${baseUrl}${path}?key=${apiKey}${ - req?.nextUrl?.searchParams?.get("alt") === "sse" ? "&alt=sse" : "" + const fetchUrl = `${baseUrl}${path}${ + req?.nextUrl?.searchParams?.get("alt") === "sse" ? "?alt=sse" : "" }`; console.log("[Fetch Url] ", fetchUrl); @@ -105,6 +101,9 @@ async function request(req: NextRequest, apiKey: string) { headers: { "Content-Type": "application/json", "Cache-Control": "no-store", + "x-goog-api-key": + req.headers.get("x-goog-api-key") || + (req.headers.get("Authorization") ?? "").replace("Bearer ", ""), }, method: req.method, body: req.body, diff --git a/app/api/iflytek.ts b/app/api/iflytek.ts index eabdd9f4ce6..8b8227dce1f 100644 --- a/app/api/iflytek.ts +++ b/app/api/iflytek.ts @@ -1,6 +1,5 @@ import { getServerSideConfig } from "@/app/config/server"; import { - Iflytek, IFLYTEK_BASE_URL, ApiPath, ModelProvider, @@ -10,7 +9,6 @@ import { prettyObject } from "@/app/utils/format"; import { NextRequest, NextResponse } from "next/server"; import { auth } from "@/app/api/auth"; import { isModelAvailableInServer } from "@/app/utils/model"; -import type { RequestPayload } from "@/app/client/platforms/openai"; // iflytek const serverConfig = getServerSideConfig(); diff --git a/app/api/moonshot.ts b/app/api/moonshot.ts index 247dd618321..5bf4807e3e6 100644 --- a/app/api/moonshot.ts +++ b/app/api/moonshot.ts @@ -1,6 +1,5 @@ import { getServerSideConfig } from "@/app/config/server"; import { - Moonshot, MOONSHOT_BASE_URL, ApiPath, ModelProvider, @@ -10,7 +9,6 @@ import { prettyObject } from "@/app/utils/format"; import { NextRequest, NextResponse } from "next/server"; import { auth } from "@/app/api/auth"; import { isModelAvailableInServer } from "@/app/utils/model"; -import type { RequestPayload } from "@/app/client/platforms/openai"; const serverConfig = getServerSideConfig(); diff --git a/app/api/openai.ts b/app/api/openai.ts index 7dfd84e1785..bbba69e569c 100644 --- a/app/api/openai.ts +++ b/app/api/openai.ts @@ -6,7 +6,7 @@ import { NextRequest, NextResponse } from "next/server"; import { auth } from "./auth"; import { requestOpenai } from "./common"; -const ALLOWD_PATH = new Set(Object.values(OpenaiPath)); +const ALLOWED_PATH = new Set(Object.values(OpenaiPath)); function getModels(remoteModelRes: OpenAIListModelResponse) { const config = getServerSideConfig(); @@ -34,7 +34,7 @@ export async function handle( const subpath = params.path.join("/"); - if (!ALLOWD_PATH.has(subpath)) { + if (!ALLOWED_PATH.has(subpath)) { console.log("[OpenAI Route] forbidden path ", subpath); return NextResponse.json( { diff --git a/app/api/tencent/route.ts b/app/api/tencent/route.ts index 885909e7a75..fc4f8c79edf 100644 --- a/app/api/tencent/route.ts +++ b/app/api/tencent/route.ts @@ -1,15 +1,8 @@ import { getServerSideConfig } from "@/app/config/server"; -import { - TENCENT_BASE_URL, - ApiPath, - ModelProvider, - ServiceProvider, - Tencent, -} from "@/app/constant"; +import { TENCENT_BASE_URL, ModelProvider } from "@/app/constant"; import { prettyObject } from "@/app/utils/format"; import { NextRequest, NextResponse } from "next/server"; import { auth } from "@/app/api/auth"; -import { isModelAvailableInServer } from "@/app/utils/model"; import { getHeader } from "@/app/utils/tencent"; const serverConfig = getServerSideConfig(); diff --git a/app/api/webdav/[...path]/route.ts b/app/api/webdav/[...path]/route.ts index 9f96cbfcf74..bb7743bda40 100644 --- a/app/api/webdav/[...path]/route.ts +++ b/app/api/webdav/[...path]/route.ts @@ -6,7 +6,7 @@ const config = getServerSideConfig(); const mergedAllowedWebDavEndpoints = [ ...internalAllowedWebDavEndpoints, - ...config.allowedWebDevEndpoints, + ...config.allowedWebDavEndpoints, ].filter((domain) => Boolean(domain.trim())); const normalizeUrl = (url: string) => { diff --git a/app/api/xai.ts b/app/api/xai.ts new file mode 100644 index 00000000000..a4ee8b39731 --- /dev/null +++ b/app/api/xai.ts @@ -0,0 +1,128 @@ +import { getServerSideConfig } from "@/app/config/server"; +import { + XAI_BASE_URL, + ApiPath, + ModelProvider, + ServiceProvider, +} from "@/app/constant"; +import { prettyObject } from "@/app/utils/format"; +import { NextRequest, NextResponse } from "next/server"; +import { auth } from "@/app/api/auth"; +import { isModelAvailableInServer } from "@/app/utils/model"; + +const serverConfig = getServerSideConfig(); + +export async function handle( + req: NextRequest, + { params }: { params: { path: string[] } }, +) { + console.log("[XAI Route] params ", params); + + if (req.method === "OPTIONS") { + return NextResponse.json({ body: "OK" }, { status: 200 }); + } + + const authResult = auth(req, ModelProvider.XAI); + if (authResult.error) { + return NextResponse.json(authResult, { + status: 401, + }); + } + + try { + const response = await request(req); + return response; + } catch (e) { + console.error("[XAI] ", e); + return NextResponse.json(prettyObject(e)); + } +} + +async function request(req: NextRequest) { + const controller = new AbortController(); + + // alibaba use base url or just remove the path + let path = `${req.nextUrl.pathname}`.replaceAll(ApiPath.XAI, ""); + + let baseUrl = serverConfig.xaiUrl || XAI_BASE_URL; + + if (!baseUrl.startsWith("http")) { + baseUrl = `https://${baseUrl}`; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, -1); + } + + console.log("[Proxy] ", path); + console.log("[Base Url]", baseUrl); + + const timeoutId = setTimeout( + () => { + controller.abort(); + }, + 10 * 60 * 1000, + ); + + const fetchUrl = `${baseUrl}${path}`; + const fetchOptions: RequestInit = { + headers: { + "Content-Type": "application/json", + Authorization: req.headers.get("Authorization") ?? "", + }, + method: req.method, + body: req.body, + redirect: "manual", + // @ts-ignore + duplex: "half", + signal: controller.signal, + }; + + // #1815 try to refuse some request to some models + if (serverConfig.customModels && req.body) { + try { + const clonedBody = await req.text(); + fetchOptions.body = clonedBody; + + const jsonBody = JSON.parse(clonedBody) as { model?: string }; + + // not undefined and is false + if ( + isModelAvailableInServer( + serverConfig.customModels, + jsonBody?.model as string, + ServiceProvider.XAI as string, + ) + ) { + return NextResponse.json( + { + error: true, + message: `you are not allowed to use ${jsonBody?.model} model`, + }, + { + status: 403, + }, + ); + } + } catch (e) { + console.error(`[XAI] filter`, e); + } + } + try { + const res = await fetch(fetchUrl, fetchOptions); + + // to prevent browser prompt for credentials + const newHeaders = new Headers(res.headers); + newHeaders.delete("www-authenticate"); + // to disable nginx buffering + newHeaders.set("X-Accel-Buffering", "no"); + + return new Response(res.body, { + status: res.status, + statusText: res.statusText, + headers: newHeaders, + }); + } finally { + clearTimeout(timeoutId); + } +} diff --git a/app/client/api.ts b/app/client/api.ts index cecc453baa2..4238c2a264b 100644 --- a/app/client/api.ts +++ b/app/client/api.ts @@ -1,7 +1,6 @@ import { getClientConfig } from "../config/client"; import { ACCESS_CODE_PREFIX, - Azure, ModelProvider, ServiceProvider, } from "../constant"; @@ -21,11 +20,13 @@ import { QwenApi } from "./platforms/alibaba"; import { HunyuanApi } from "./platforms/tencent"; import { MoonshotApi } from "./platforms/moonshot"; import { SparkApi } from "./platforms/iflytek"; +import { XAIApi } from "./platforms/xai"; export const ROLES = ["system", "user", "assistant"] as const; export type MessageRole = (typeof ROLES)[number]; export const Models = ["gpt-3.5-turbo", "gpt-4"] as const; +export const TTSModels = ["tts-1", "tts-1-hd"] as const; export type ChatModel = ModelType; export interface MultimodalContent { @@ -54,6 +55,15 @@ export interface LLMConfig { style?: DalleRequestPayload["style"]; } +export interface SpeechOptions { + model: string; + input: string; + voice: string; + response_format?: string; + speed?: number; + onController?: (controller: AbortController) => void; +} + export interface ChatOptions { messages: RequestMessage[]; config: LLMConfig; @@ -88,6 +98,7 @@ export interface LLMModelProvider { export abstract class LLMApi { abstract chat(options: ChatOptions): Promise; + abstract speech(options: SpeechOptions): Promise; abstract usage(): Promise; abstract models(): Promise; } @@ -142,6 +153,9 @@ export class ClientApi { case ModelProvider.Iflytek: this.llm = new SparkApi(); break; + case ModelProvider.XAI: + this.llm = new XAIApi(); + break; default: this.llm = new ChatGPTApi(); } @@ -206,19 +220,22 @@ export function validString(x: string): boolean { return x?.length > 0; } -export function getHeaders() { +export function getHeaders(ignoreHeaders: boolean = false) { const accessStore = useAccessStore.getState(); const chatStore = useChatStore.getState(); - const headers: Record = { - "Content-Type": "application/json", - Accept: "application/json", - }; + let headers: Record = {}; + if (!ignoreHeaders) { + headers = { + "Content-Type": "application/json", + Accept: "application/json", + }; + } const clientConfig = getClientConfig(); function getConfig() { const modelConfig = chatStore.currentSession().mask.modelConfig; - const isGoogle = modelConfig.providerName == ServiceProvider.Google; + const isGoogle = modelConfig.providerName === ServiceProvider.Google; const isAzure = modelConfig.providerName === ServiceProvider.Azure; const isAnthropic = modelConfig.providerName === ServiceProvider.Anthropic; const isBaidu = modelConfig.providerName == ServiceProvider.Baidu; @@ -226,6 +243,7 @@ export function getHeaders() { const isAlibaba = modelConfig.providerName === ServiceProvider.Alibaba; const isMoonshot = modelConfig.providerName === ServiceProvider.Moonshot; const isIflytek = modelConfig.providerName === ServiceProvider.Iflytek; + const isXAI = modelConfig.providerName === ServiceProvider.XAI; const isEnabledAccessControl = accessStore.enabledAccessControl(); const apiKey = isGoogle ? accessStore.googleApiKey @@ -239,6 +257,8 @@ export function getHeaders() { ? accessStore.alibabaApiKey : isMoonshot ? accessStore.moonshotApiKey + : isXAI + ? accessStore.xaiApiKey : isIflytek ? accessStore.iflytekApiKey && accessStore.iflytekApiSecret ? accessStore.iflytekApiKey + ":" + accessStore.iflytekApiSecret @@ -253,13 +273,20 @@ export function getHeaders() { isAlibaba, isMoonshot, isIflytek, + isXAI, apiKey, isEnabledAccessControl, }; } function getAuthHeader(): string { - return isAzure ? "api-key" : isAnthropic ? "x-api-key" : "Authorization"; + return isAzure + ? "api-key" + : isAnthropic + ? "x-api-key" + : isGoogle + ? "x-goog-api-key" + : "Authorization"; } const { @@ -270,14 +297,15 @@ export function getHeaders() { apiKey, isEnabledAccessControl, } = getConfig(); - // when using google api in app, not set auth header - if (isGoogle && clientConfig?.isApp) return headers; // when using baidu api in app, not set auth header if (isBaidu && clientConfig?.isApp) return headers; const authHeader = getAuthHeader(); - const bearerToken = getBearerToken(apiKey, isAzure || isAnthropic); + const bearerToken = getBearerToken( + apiKey, + isAzure || isAnthropic || isGoogle, + ); if (bearerToken) { headers[authHeader] = bearerToken; @@ -308,6 +336,8 @@ export function getClientApi(provider: ServiceProvider): ClientApi { return new ClientApi(ModelProvider.Moonshot); case ServiceProvider.Iflytek: return new ClientApi(ModelProvider.Iflytek); + case ServiceProvider.XAI: + return new ClientApi(ModelProvider.XAI); default: return new ClientApi(ModelProvider.GPT); } diff --git a/app/client/platforms/alibaba.ts b/app/client/platforms/alibaba.ts index d5fa3042fc1..86229a14705 100644 --- a/app/client/platforms/alibaba.ts +++ b/app/client/platforms/alibaba.ts @@ -12,6 +12,7 @@ import { getHeaders, LLMApi, LLMModel, + SpeechOptions, MultimodalContent, } from "../api"; import Locale from "../../locales"; @@ -22,6 +23,7 @@ import { import { prettyObject } from "@/app/utils/format"; import { getClientConfig } from "@/app/config/client"; import { getMessageTextContent } from "@/app/utils"; +import { fetch } from "@/app/utils/stream"; export interface OpenAIListModelResponse { object: string; @@ -83,6 +85,10 @@ export class QwenApi implements LLMApi { return res?.output?.choices?.at(0)?.message?.content ?? ""; } + speech(options: SpeechOptions): Promise { + throw new Error("Method not implemented."); + } + async chat(options: ChatOptions) { const messages = options.messages.map((v) => ({ role: v.role, @@ -173,6 +179,7 @@ export class QwenApi implements LLMApi { controller.signal.onabort = finish; fetchEventSource(chatPath, { + fetch: fetch as any, ...chatPayload, async onopen(res) { clearTimeout(requestTimeoutId); diff --git a/app/client/platforms/anthropic.ts b/app/client/platforms/anthropic.ts index 7dd39c9cddc..3645cbe6eac 100644 --- a/app/client/platforms/anthropic.ts +++ b/app/client/platforms/anthropic.ts @@ -1,5 +1,5 @@ -import { ACCESS_CODE_PREFIX, Anthropic, ApiPath } from "@/app/constant"; -import { ChatOptions, getHeaders, LLMApi, MultimodalContent } from "../api"; +import { Anthropic, ApiPath } from "@/app/constant"; +import { ChatOptions, getHeaders, LLMApi, SpeechOptions } from "../api"; import { useAccessStore, useAppConfig, @@ -8,18 +8,12 @@ import { ChatMessageTool, } from "@/app/store"; import { getClientConfig } from "@/app/config/client"; -import { DEFAULT_API_HOST } from "@/app/constant"; -import { - EventStreamContentType, - fetchEventSource, -} from "@fortaine/fetch-event-source"; - -import Locale from "../../locales"; -import { prettyObject } from "@/app/utils/format"; +import { ANTHROPIC_BASE_URL } from "@/app/constant"; import { getMessageTextContent, isVisionModel } from "@/app/utils"; import { preProcessImageContent, stream } from "@/app/utils/chat"; import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; import { RequestPayload } from "./openai"; +import { fetch } from "@/app/utils/stream"; export type MultiBlockContent = { type: "image" | "text"; @@ -80,6 +74,10 @@ const ClaudeMapper = { const keys = ["claude-2, claude-instant-1"]; export class ClaudeApi implements LLMApi { + speech(options: SpeechOptions): Promise { + throw new Error("Method not implemented."); + } + extractMessage(res: any) { console.log("[Response] claude response: ", res); @@ -391,9 +389,7 @@ export class ClaudeApi implements LLMApi { if (baseUrl.trim().length === 0) { const isApp = !!getClientConfig()?.isApp; - baseUrl = isApp - ? DEFAULT_API_HOST + "/api/proxy/anthropic" - : ApiPath.Anthropic; + baseUrl = isApp ? ANTHROPIC_BASE_URL : ApiPath.Anthropic; } if (!baseUrl.startsWith("http") && !baseUrl.startsWith("/api")) { diff --git a/app/client/platforms/baidu.ts b/app/client/platforms/baidu.ts index 3be147f4985..2511a696b9b 100644 --- a/app/client/platforms/baidu.ts +++ b/app/client/platforms/baidu.ts @@ -14,6 +14,7 @@ import { LLMApi, LLMModel, MultimodalContent, + SpeechOptions, } from "../api"; import Locale from "../../locales"; import { @@ -23,6 +24,7 @@ import { import { prettyObject } from "@/app/utils/format"; import { getClientConfig } from "@/app/config/client"; import { getMessageTextContent } from "@/app/utils"; +import { fetch } from "@/app/utils/stream"; export interface OpenAIListModelResponse { object: string; @@ -75,6 +77,10 @@ export class ErnieApi implements LLMApi { return [baseUrl, path].join("/"); } + speech(options: SpeechOptions): Promise { + throw new Error("Method not implemented."); + } + async chat(options: ChatOptions) { const messages = options.messages.map((v) => ({ // "error_code": 336006, "error_msg": "the role of message with even index in the messages must be user or function", @@ -192,6 +198,7 @@ export class ErnieApi implements LLMApi { controller.signal.onabort = finish; fetchEventSource(chatPath, { + fetch: fetch as any, ...chatPayload, async onopen(res) { clearTimeout(requestTimeoutId); diff --git a/app/client/platforms/bytedance.ts b/app/client/platforms/bytedance.ts index 7677cafe12b..000a9e278db 100644 --- a/app/client/platforms/bytedance.ts +++ b/app/client/platforms/bytedance.ts @@ -13,6 +13,7 @@ import { LLMApi, LLMModel, MultimodalContent, + SpeechOptions, } from "../api"; import Locale from "../../locales"; import { @@ -22,6 +23,7 @@ import { import { prettyObject } from "@/app/utils/format"; import { getClientConfig } from "@/app/config/client"; import { getMessageTextContent } from "@/app/utils"; +import { fetch } from "@/app/utils/stream"; export interface OpenAIListModelResponse { object: string; @@ -77,6 +79,10 @@ export class DoubaoApi implements LLMApi { return res.choices?.at(0)?.message?.content ?? ""; } + speech(options: SpeechOptions): Promise { + throw new Error("Method not implemented."); + } + async chat(options: ChatOptions) { const messages = options.messages.map((v) => ({ role: v.role, @@ -160,6 +166,7 @@ export class DoubaoApi implements LLMApi { controller.signal.onabort = finish; fetchEventSource(chatPath, { + fetch: fetch as any, ...chatPayload, async onopen(res) { clearTimeout(requestTimeoutId); diff --git a/app/client/platforms/google.ts b/app/client/platforms/google.ts index 12d8846357a..a4b594ddfed 100644 --- a/app/client/platforms/google.ts +++ b/app/client/platforms/google.ts @@ -1,20 +1,32 @@ import { ApiPath, Google, REQUEST_TIMEOUT_MS } from "@/app/constant"; -import { ChatOptions, getHeaders, LLMApi, LLMModel, LLMUsage } from "../api"; -import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; -import { getClientConfig } from "@/app/config/client"; -import { DEFAULT_API_HOST } from "@/app/constant"; -import Locale from "../../locales"; import { - EventStreamContentType, - fetchEventSource, -} from "@fortaine/fetch-event-source"; -import { prettyObject } from "@/app/utils/format"; + ChatOptions, + getHeaders, + LLMApi, + LLMModel, + LLMUsage, + SpeechOptions, +} from "../api"; +import { + useAccessStore, + useAppConfig, + useChatStore, + usePluginStore, + ChatMessageTool, +} from "@/app/store"; +import { stream } from "@/app/utils/chat"; +import { getClientConfig } from "@/app/config/client"; +import { GEMINI_BASE_URL } from "@/app/constant"; + import { getMessageTextContent, getMessageImages, isVisionModel, } from "@/app/utils"; import { preProcessImageContent } from "@/app/utils/chat"; +import { nanoid } from "nanoid"; +import { RequestPayload } from "./openai"; +import { fetch } from "@/app/utils/stream"; export class GeminiProApi implements LLMApi { path(path: string): string { @@ -27,7 +39,7 @@ export class GeminiProApi implements LLMApi { const isApp = !!getClientConfig()?.isApp; if (baseUrl.length === 0) { - baseUrl = isApp ? DEFAULT_API_HOST + `/api/proxy/google` : ApiPath.Google; + baseUrl = isApp ? GEMINI_BASE_URL : ApiPath.Google; } if (baseUrl.endsWith("/")) { baseUrl = baseUrl.slice(0, baseUrl.length - 1); @@ -41,10 +53,6 @@ export class GeminiProApi implements LLMApi { let chatPath = [baseUrl, path].join("/"); chatPath += chatPath.includes("?") ? "&alt=sse" : "?alt=sse"; - // if chatPath.startsWith('http') then add key in query string - if (chatPath.startsWith("http") && accessStore.googleApiKey) { - chatPath += `&key=${accessStore.googleApiKey}`; - } return chatPath; } extractMessage(res: any) { @@ -56,6 +64,10 @@ export class GeminiProApi implements LLMApi { "" ); } + speech(options: SpeechOptions): Promise { + throw new Error("Method not implemented."); + } + async chat(options: ChatOptions): Promise { const apiClient = this; let multimodal = false; @@ -170,114 +182,84 @@ export class GeminiProApi implements LLMApi { ); if (shouldStream) { - let responseText = ""; - let remainText = ""; - let finished = false; - - const finish = () => { - if (!finished) { - finished = true; - options.onFinish(responseText + remainText); - } - }; - - // animate response to make it looks smooth - function animateResponseText() { - if (finished || controller.signal.aborted) { - responseText += remainText; - finish(); - return; - } - - if (remainText.length > 0) { - const fetchCount = Math.max(1, Math.round(remainText.length / 60)); - const fetchText = remainText.slice(0, fetchCount); - responseText += fetchText; - remainText = remainText.slice(fetchCount); - options.onUpdate?.(responseText, fetchText); - } - - requestAnimationFrame(animateResponseText); - } - - // start animaion - animateResponseText(); - - controller.signal.onabort = finish; - - fetchEventSource(chatPath, { - ...chatPayload, - async onopen(res) { - clearTimeout(requestTimeoutId); - const contentType = res.headers.get("content-type"); - console.log( - "[Gemini] request response content type: ", - contentType, - ); - - if (contentType?.startsWith("text/plain")) { - responseText = await res.clone().text(); - return finish(); - } - - if ( - !res.ok || - !res.headers - .get("content-type") - ?.startsWith(EventStreamContentType) || - res.status !== 200 - ) { - const responseTexts = [responseText]; - let extraInfo = await res.clone().text(); - try { - const resJson = await res.clone().json(); - extraInfo = prettyObject(resJson); - } catch {} - - if (res.status === 401) { - responseTexts.push(Locale.Error.Unauthorized); - } - - if (extraInfo) { - responseTexts.push(extraInfo); - } - - responseText = responseTexts.join("\n\n"); - - return finish(); - } - }, - onmessage(msg) { - if (msg.data === "[DONE]" || finished) { - return finish(); - } - const text = msg.data; - try { - const json = JSON.parse(text); - const delta = apiClient.extractMessage(json); - - if (delta) { - remainText += delta; - } + const [tools, funcs] = usePluginStore + .getState() + .getAsTools( + useChatStore.getState().currentSession().mask?.plugin || [], + ); + return stream( + chatPath, + requestPayload, + getHeaders(), + // @ts-ignore + tools.length > 0 + ? // @ts-ignore + [{ functionDeclarations: tools.map((tool) => tool.function) }] + : [], + funcs, + controller, + // parseSSE + (text: string, runTools: ChatMessageTool[]) => { + // console.log("parseSSE", text, runTools); + const chunkJson = JSON.parse(text); - const blockReason = json?.promptFeedback?.blockReason; - if (blockReason) { - // being blocked - console.log(`[Google] [Safety Ratings] result:`, blockReason); - } - } catch (e) { - console.error("[Request] parse error", text, msg); + const functionCall = chunkJson?.candidates + ?.at(0) + ?.content.parts.at(0)?.functionCall; + if (functionCall) { + const { name, args } = functionCall; + runTools.push({ + id: nanoid(), + type: "function", + function: { + name, + arguments: JSON.stringify(args), // utils.chat call function, using JSON.parse + }, + }); } + return chunkJson?.candidates?.at(0)?.content.parts.at(0)?.text; }, - onclose() { - finish(); - }, - onerror(e) { - options.onError?.(e); - throw e; + // processToolMessage, include tool_calls message and tool call results + ( + requestPayload: RequestPayload, + toolCallMessage: any, + toolCallResult: any[], + ) => { + // @ts-ignore + requestPayload?.contents?.splice( + // @ts-ignore + requestPayload?.contents?.length, + 0, + { + role: "model", + parts: toolCallMessage.tool_calls.map( + (tool: ChatMessageTool) => ({ + functionCall: { + name: tool?.function?.name, + args: JSON.parse(tool?.function?.arguments as string), + }, + }), + ), + }, + // @ts-ignore + ...toolCallResult.map((result) => ({ + role: "function", + parts: [ + { + functionResponse: { + name: result.name, + response: { + name: result.name, + content: result.content, // TODO just text content... + }, + }, + }, + ], + })), + ); }, - openWhenHidden: true, - }); + options, + ); } else { const res = await fetch(chatPath, chatPayload); clearTimeout(requestTimeoutId); diff --git a/app/client/platforms/iflytek.ts b/app/client/platforms/iflytek.ts index 73cea5ba0e7..55a39d0ccca 100644 --- a/app/client/platforms/iflytek.ts +++ b/app/client/platforms/iflytek.ts @@ -1,13 +1,19 @@ "use client"; import { ApiPath, - DEFAULT_API_HOST, + IFLYTEK_BASE_URL, Iflytek, REQUEST_TIMEOUT_MS, } from "@/app/constant"; import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; -import { ChatOptions, getHeaders, LLMApi, LLMModel } from "../api"; +import { + ChatOptions, + getHeaders, + LLMApi, + LLMModel, + SpeechOptions, +} from "../api"; import Locale from "../../locales"; import { EventStreamContentType, @@ -16,8 +22,9 @@ import { import { prettyObject } from "@/app/utils/format"; import { getClientConfig } from "@/app/config/client"; import { getMessageTextContent } from "@/app/utils"; +import { fetch } from "@/app/utils/stream"; -import { OpenAIListModelResponse, RequestPayload } from "./openai"; +import { RequestPayload } from "./openai"; export class SparkApi implements LLMApi { private disableListModels = true; @@ -34,7 +41,7 @@ export class SparkApi implements LLMApi { if (baseUrl.length === 0) { const isApp = !!getClientConfig()?.isApp; const apiPath = ApiPath.Iflytek; - baseUrl = isApp ? DEFAULT_API_HOST + "/proxy" + apiPath : apiPath; + baseUrl = isApp ? IFLYTEK_BASE_URL : apiPath; } if (baseUrl.endsWith("/")) { @@ -53,6 +60,10 @@ export class SparkApi implements LLMApi { return res.choices?.at(0)?.message?.content ?? ""; } + speech(options: SpeechOptions): Promise { + throw new Error("Method not implemented."); + } + async chat(options: ChatOptions) { const messages: ChatOptions["messages"] = []; for (const v of options.messages) { @@ -139,6 +150,7 @@ export class SparkApi implements LLMApi { controller.signal.onabort = finish; fetchEventSource(chatPath, { + fetch: fetch as any, ...chatPayload, async onopen(res) { clearTimeout(requestTimeoutId); diff --git a/app/client/platforms/moonshot.ts b/app/client/platforms/moonshot.ts index cd10d2f6c15..22a34b2e22f 100644 --- a/app/client/platforms/moonshot.ts +++ b/app/client/platforms/moonshot.ts @@ -2,11 +2,9 @@ // azure and openai, using same models. so using same LLMApi. import { ApiPath, - DEFAULT_API_HOST, - DEFAULT_MODELS, + MOONSHOT_BASE_URL, Moonshot, REQUEST_TIMEOUT_MS, - ServiceProvider, } from "@/app/constant"; import { useAccessStore, @@ -15,28 +13,18 @@ import { ChatMessageTool, usePluginStore, } from "@/app/store"; -import { collectModelsWithDefaultModel } from "@/app/utils/model"; -import { preProcessImageContent, stream } from "@/app/utils/chat"; -import { cloudflareAIGatewayUrl } from "@/app/utils/cloudflare"; - +import { stream } from "@/app/utils/chat"; import { ChatOptions, getHeaders, LLMApi, LLMModel, - LLMUsage, - MultimodalContent, + SpeechOptions, } from "../api"; -import Locale from "../../locales"; -import { - EventStreamContentType, - fetchEventSource, -} from "@fortaine/fetch-event-source"; -import { prettyObject } from "@/app/utils/format"; import { getClientConfig } from "@/app/config/client"; import { getMessageTextContent } from "@/app/utils"; - -import { OpenAIListModelResponse, RequestPayload } from "./openai"; +import { RequestPayload } from "./openai"; +import { fetch } from "@/app/utils/stream"; export class MoonshotApi implements LLMApi { private disableListModels = true; @@ -53,7 +41,7 @@ export class MoonshotApi implements LLMApi { if (baseUrl.length === 0) { const isApp = !!getClientConfig()?.isApp; const apiPath = ApiPath.Moonshot; - baseUrl = isApp ? DEFAULT_API_HOST + "/proxy" + apiPath : apiPath; + baseUrl = isApp ? MOONSHOT_BASE_URL : apiPath; } if (baseUrl.endsWith("/")) { @@ -72,6 +60,10 @@ export class MoonshotApi implements LLMApi { return res.choices?.at(0)?.message?.content ?? ""; } + speech(options: SpeechOptions): Promise { + throw new Error("Method not implemented."); + } + async chat(options: ChatOptions) { const messages: ChatOptions["messages"] = []; for (const v of options.messages) { diff --git a/app/client/platforms/openai.ts b/app/client/platforms/openai.ts index 664ff872ba3..30f7415c141 100644 --- a/app/client/platforms/openai.ts +++ b/app/client/platforms/openai.ts @@ -2,7 +2,7 @@ // azure and openai, using same models. so using same LLMApi. import { ApiPath, - DEFAULT_API_HOST, + OPENAI_BASE_URL, DEFAULT_MODELS, OpenaiPath, Azure, @@ -33,20 +33,16 @@ import { LLMModel, LLMUsage, MultimodalContent, + SpeechOptions, } from "../api"; import Locale from "../../locales"; -import { - EventStreamContentType, - fetchEventSource, -} from "@fortaine/fetch-event-source"; -import { prettyObject } from "@/app/utils/format"; import { getClientConfig } from "@/app/config/client"; import { getMessageTextContent, - getMessageImages, isVisionModel, isDalle3 as _isDalle3, } from "@/app/utils"; +import { fetch } from "@/app/utils/stream"; export interface OpenAIListModelResponse { object: string; @@ -103,7 +99,7 @@ export class ChatGPTApi implements LLMApi { if (baseUrl.length === 0) { const isApp = !!getClientConfig()?.isApp; const apiPath = isAzure ? ApiPath.Azure : ApiPath.OpenAI; - baseUrl = isApp ? DEFAULT_API_HOST + "/proxy" + apiPath : apiPath; + baseUrl = isApp ? OPENAI_BASE_URL : apiPath; } if (baseUrl.endsWith("/")) { @@ -147,6 +143,44 @@ export class ChatGPTApi implements LLMApi { return res.choices?.at(0)?.message?.content ?? res; } + async speech(options: SpeechOptions): Promise { + const requestPayload = { + model: options.model, + input: options.input, + voice: options.voice, + response_format: options.response_format, + speed: options.speed, + }; + + console.log("[Request] openai speech payload: ", requestPayload); + + const controller = new AbortController(); + options.onController?.(controller); + + try { + const speechPath = this.path(OpenaiPath.SpeechPath); + const speechPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: getHeaders(), + }; + + // make a fetch request + const requestTimeoutId = setTimeout( + () => controller.abort(), + REQUEST_TIMEOUT_MS, + ); + + const res = await fetch(speechPath, speechPayload); + clearTimeout(requestTimeoutId); + return await res.arrayBuffer(); + } catch (e) { + console.log("[Request] failed to make a speech request", e); + throw e; + } + } + async chat(options: ChatOptions) { const modelConfig = { ...useAppConfig.getState().modelConfig, @@ -244,6 +278,7 @@ export class ChatGPTApi implements LLMApi { ); } if (shouldStream) { + let index = -1; const [tools, funcs] = usePluginStore .getState() .getAsTools( @@ -269,10 +304,10 @@ export class ChatGPTApi implements LLMApi { }>; const tool_calls = choices[0]?.delta?.tool_calls; if (tool_calls?.length > 0) { - const index = tool_calls[0]?.index; const id = tool_calls[0]?.id; const args = tool_calls[0]?.function?.arguments; if (id) { + index += 1; runTools.push({ id, type: tool_calls[0]?.type, @@ -294,6 +329,8 @@ export class ChatGPTApi implements LLMApi { toolCallMessage: any, toolCallResult: any[], ) => { + // reset index value + index = -1; // @ts-ignore requestPayload?.messages?.splice( // @ts-ignore @@ -316,7 +353,7 @@ export class ChatGPTApi implements LLMApi { // make a fetch request const requestTimeoutId = setTimeout( () => controller.abort(), - isDalle3 || isO1 ? REQUEST_TIMEOUT_MS * 2 : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow. + isDalle3 || isO1 ? REQUEST_TIMEOUT_MS * 4 : REQUEST_TIMEOUT_MS, // dalle3 using b64_json is slow. ); const res = await fetch(chatPath, chatPayload); diff --git a/app/client/platforms/tencent.ts b/app/client/platforms/tencent.ts index 579008a9b9d..3610fac0a48 100644 --- a/app/client/platforms/tencent.ts +++ b/app/client/platforms/tencent.ts @@ -1,5 +1,5 @@ "use client"; -import { ApiPath, DEFAULT_API_HOST, REQUEST_TIMEOUT_MS } from "@/app/constant"; +import { ApiPath, TENCENT_BASE_URL, REQUEST_TIMEOUT_MS } from "@/app/constant"; import { useAccessStore, useAppConfig, useChatStore } from "@/app/store"; import { @@ -8,6 +8,7 @@ import { LLMApi, LLMModel, MultimodalContent, + SpeechOptions, } from "../api"; import Locale from "../../locales"; import { @@ -21,6 +22,7 @@ import mapKeys from "lodash-es/mapKeys"; import mapValues from "lodash-es/mapValues"; import isArray from "lodash-es/isArray"; import isObject from "lodash-es/isObject"; +import { fetch } from "@/app/utils/stream"; export interface OpenAIListModelResponse { object: string; @@ -69,9 +71,7 @@ export class HunyuanApi implements LLMApi { if (baseUrl.length === 0) { const isApp = !!getClientConfig()?.isApp; - baseUrl = isApp - ? DEFAULT_API_HOST + "/api/proxy/tencent" - : ApiPath.Tencent; + baseUrl = isApp ? TENCENT_BASE_URL : ApiPath.Tencent; } if (baseUrl.endsWith("/")) { @@ -89,6 +89,10 @@ export class HunyuanApi implements LLMApi { return res.Choices?.at(0)?.Message?.Content ?? ""; } + speech(options: SpeechOptions): Promise { + throw new Error("Method not implemented."); + } + async chat(options: ChatOptions) { const visionModel = isVisionModel(options.config.model); const messages = options.messages.map((v, index) => ({ @@ -174,6 +178,7 @@ export class HunyuanApi implements LLMApi { controller.signal.onabort = finish; fetchEventSource(chatPath, { + fetch: fetch as any, ...chatPayload, async onopen(res) { clearTimeout(requestTimeoutId); diff --git a/app/client/platforms/xai.ts b/app/client/platforms/xai.ts new file mode 100644 index 00000000000..deb74e66c03 --- /dev/null +++ b/app/client/platforms/xai.ts @@ -0,0 +1,193 @@ +"use client"; +// azure and openai, using same models. so using same LLMApi. +import { ApiPath, XAI_BASE_URL, XAI, REQUEST_TIMEOUT_MS } from "@/app/constant"; +import { + useAccessStore, + useAppConfig, + useChatStore, + ChatMessageTool, + usePluginStore, +} from "@/app/store"; +import { stream } from "@/app/utils/chat"; +import { + ChatOptions, + getHeaders, + LLMApi, + LLMModel, + SpeechOptions, +} from "../api"; +import { getClientConfig } from "@/app/config/client"; +import { getMessageTextContent } from "@/app/utils"; +import { RequestPayload } from "./openai"; +import { fetch } from "@/app/utils/stream"; + +export class XAIApi implements LLMApi { + private disableListModels = true; + + path(path: string): string { + const accessStore = useAccessStore.getState(); + + let baseUrl = ""; + + if (accessStore.useCustomConfig) { + baseUrl = accessStore.xaiUrl; + } + + if (baseUrl.length === 0) { + const isApp = !!getClientConfig()?.isApp; + const apiPath = ApiPath.XAI; + baseUrl = isApp ? XAI_BASE_URL : apiPath; + } + + if (baseUrl.endsWith("/")) { + baseUrl = baseUrl.slice(0, baseUrl.length - 1); + } + if (!baseUrl.startsWith("http") && !baseUrl.startsWith(ApiPath.XAI)) { + baseUrl = "https://" + baseUrl; + } + + console.log("[Proxy Endpoint] ", baseUrl, path); + + return [baseUrl, path].join("/"); + } + + extractMessage(res: any) { + return res.choices?.at(0)?.message?.content ?? ""; + } + + speech(options: SpeechOptions): Promise { + throw new Error("Method not implemented."); + } + + async chat(options: ChatOptions) { + const messages: ChatOptions["messages"] = []; + for (const v of options.messages) { + const content = getMessageTextContent(v); + messages.push({ role: v.role, content }); + } + + const modelConfig = { + ...useAppConfig.getState().modelConfig, + ...useChatStore.getState().currentSession().mask.modelConfig, + ...{ + model: options.config.model, + providerName: options.config.providerName, + }, + }; + + const requestPayload: RequestPayload = { + messages, + stream: options.config.stream, + model: modelConfig.model, + temperature: modelConfig.temperature, + presence_penalty: modelConfig.presence_penalty, + frequency_penalty: modelConfig.frequency_penalty, + top_p: modelConfig.top_p, + }; + + console.log("[Request] xai payload: ", requestPayload); + + const shouldStream = !!options.config.stream; + const controller = new AbortController(); + options.onController?.(controller); + + try { + const chatPath = this.path(XAI.ChatPath); + const chatPayload = { + method: "POST", + body: JSON.stringify(requestPayload), + signal: controller.signal, + headers: getHeaders(), + }; + + // make a fetch request + const requestTimeoutId = setTimeout( + () => controller.abort(), + REQUEST_TIMEOUT_MS, + ); + + if (shouldStream) { + const [tools, funcs] = usePluginStore + .getState() + .getAsTools( + useChatStore.getState().currentSession().mask?.plugin || [], + ); + return stream( + chatPath, + requestPayload, + getHeaders(), + tools as any, + funcs, + controller, + // parseSSE + (text: string, runTools: ChatMessageTool[]) => { + // console.log("parseSSE", text, runTools); + const json = JSON.parse(text); + const choices = json.choices as Array<{ + delta: { + content: string; + tool_calls: ChatMessageTool[]; + }; + }>; + const tool_calls = choices[0]?.delta?.tool_calls; + if (tool_calls?.length > 0) { + const index = tool_calls[0]?.index; + const id = tool_calls[0]?.id; + const args = tool_calls[0]?.function?.arguments; + if (id) { + runTools.push({ + id, + type: tool_calls[0]?.type, + function: { + name: tool_calls[0]?.function?.name as string, + arguments: args, + }, + }); + } else { + // @ts-ignore + runTools[index]["function"]["arguments"] += args; + } + } + return choices[0]?.delta?.content; + }, + // processToolMessage, include tool_calls message and tool call results + ( + requestPayload: RequestPayload, + toolCallMessage: any, + toolCallResult: any[], + ) => { + // @ts-ignore + requestPayload?.messages?.splice( + // @ts-ignore + requestPayload?.messages?.length, + 0, + toolCallMessage, + ...toolCallResult, + ); + }, + options, + ); + } else { + const res = await fetch(chatPath, chatPayload); + clearTimeout(requestTimeoutId); + + const resJson = await res.json(); + const message = this.extractMessage(resJson); + options.onFinish(message); + } + } catch (e) { + console.log("[Request] failed to make a chat request", e); + options.onError?.(e as Error); + } + } + async usage() { + return { + used: 0, + total: 0, + }; + } + + async models(): Promise { + return []; + } +} diff --git a/app/command.ts b/app/command.ts index bea4e06f381..aec73ef53d6 100644 --- a/app/command.ts +++ b/app/command.ts @@ -38,6 +38,7 @@ interface ChatCommands { next?: Command; prev?: Command; clear?: Command; + fork?: Command; del?: Command; } diff --git a/app/components/artifacts.tsx b/app/components/artifacts.tsx index d725ee6596e..ce187fbcb2c 100644 --- a/app/components/artifacts.tsx +++ b/app/components/artifacts.tsx @@ -7,7 +7,6 @@ import { useImperativeHandle, } from "react"; import { useParams } from "react-router"; -import { useWindowSize } from "@/app/utils"; import { IconButton } from "./button"; import { nanoid } from "nanoid"; import ExportIcon from "../icons/share.svg"; diff --git a/app/components/auth.module.scss b/app/components/auth.module.scss index 6630c0613c7..fe143b4289b 100644 --- a/app/components/auth.module.scss +++ b/app/components/auth.module.scss @@ -1,12 +1,70 @@ .auth-page { display: flex; - justify-content: center; + justify-content: flex-start; align-items: center; height: 100%; width: 100%; flex-direction: column; + .top-banner { + position: relative; + width: 100%; + display: flex; + justify-content: center; + align-items: center; + padding: 12px 64px; + box-sizing: border-box; + background: var(--second); + .top-banner-inner { + display: flex; + justify-content: center; + align-items: center; + font-size: 14px; + line-height: 150%; + span { + gap: 8px; + a { + display: inline-flex; + align-items: center; + text-decoration: none; + margin-left: 8px; + color: var(--primary); + } + } + } + .top-banner-close { + cursor: pointer; + position: absolute; + top: 50%; + right: 48px; + transform: translateY(-50%); + } + } + + @media (max-width: 600px) { + .top-banner { + padding: 12px 24px 12px 12px; + .top-banner-close { + right: 10px; + } + .top-banner-inner { + .top-banner-logo { + margin-right: 8px; + } + } + } + } + + .auth-header { + display: flex; + justify-content: space-between; + width: 100%; + padding: 10px; + box-sizing: border-box; + animation: slide-in-from-top ease 0.3s; + } .auth-logo { + margin-top: 10vh; transform: scale(1.4); } @@ -14,6 +72,7 @@ font-size: 24px; font-weight: bold; line-height: 2; + margin-bottom: 1vh; } .auth-tips { @@ -24,6 +83,10 @@ margin: 3vh 0; } + .auth-input-second { + margin: 0 0 3vh 0; + } + .auth-actions { display: flex; justify-content: center; diff --git a/app/components/auth.tsx b/app/components/auth.tsx index 57118349bac..539a52eecc9 100644 --- a/app/components/auth.tsx +++ b/app/components/auth.tsx @@ -1,21 +1,35 @@ import styles from "./auth.module.scss"; import { IconButton } from "./button"; - +import { useState, useEffect } from "react"; import { useNavigate } from "react-router-dom"; -import { Path } from "../constant"; +import { Path, SAAS_CHAT_URL } from "../constant"; import { useAccessStore } from "../store"; import Locale from "../locales"; - +import Delete from "../icons/close.svg"; +import Arrow from "../icons/arrow.svg"; +import Logo from "../icons/logo.svg"; +import { useMobileScreen } from "@/app/utils"; import BotIcon from "../icons/bot.svg"; -import { useEffect } from "react"; import { getClientConfig } from "../config/client"; +import { PasswordInput } from "./ui-lib"; +import LeftIcon from "@/app/icons/left.svg"; +import { safeLocalStorage } from "@/app/utils"; +import { + trackSettingsPageGuideToCPaymentClick, + trackAuthorizationPageButtonToCPaymentClick, +} from "../utils/auth-settings-events"; +const storage = safeLocalStorage(); export function AuthPage() { const navigate = useNavigate(); const accessStore = useAccessStore(); - const goHome = () => navigate(Path.Home); const goChat = () => navigate(Path.Chat); + const goSaas = () => { + trackAuthorizationPageButtonToCPaymentClick(); + window.location.href = SAAS_CHAT_URL; + }; + const resetAccessCode = () => { accessStore.update((access) => { access.openaiApiKey = ""; @@ -32,6 +46,14 @@ export function AuthPage() { return (
+ +
+ } + text={Locale.Auth.Return} + onClick={() => navigate(Path.Home)} + > +
@@ -39,36 +61,43 @@ export function AuthPage() {
{Locale.Auth.Title}
{Locale.Auth.Tips}
- { accessStore.update( (access) => (access.accessCode = e.currentTarget.value), ); }} /> + {!accessStore.hideUserApiKey ? ( <>
{Locale.Auth.SubTips}
- { accessStore.update( (access) => (access.openaiApiKey = e.currentTarget.value), ); }} /> - { accessStore.update( (access) => (access.googleApiKey = e.currentTarget.value), @@ -85,13 +114,74 @@ export function AuthPage() { onClick={goChat} /> { - resetAccessCode(); - goHome(); + goSaas(); }} />
); } + +function TopBanner() { + const [isHovered, setIsHovered] = useState(false); + const [isVisible, setIsVisible] = useState(true); + const isMobile = useMobileScreen(); + useEffect(() => { + // 检查 localStorage 中是否有标记 + const bannerDismissed = storage.getItem("bannerDismissed"); + // 如果标记不存在,存储默认值并显示横幅 + if (!bannerDismissed) { + storage.setItem("bannerDismissed", "false"); + setIsVisible(true); // 显示横幅 + } else if (bannerDismissed === "true") { + // 如果标记为 "true",则隐藏横幅 + setIsVisible(false); + } + }, []); + + const handleMouseEnter = () => { + setIsHovered(true); + }; + + const handleMouseLeave = () => { + setIsHovered(false); + }; + + const handleClose = () => { + setIsVisible(false); + storage.setItem("bannerDismissed", "true"); + }; + + if (!isVisible) { + return null; + } + return ( +
+ + {(isHovered || isMobile) && ( + + )} +
+ ); +} diff --git a/app/components/button.module.scss b/app/components/button.module.scss index e332df2d2c8..05248bee812 100644 --- a/app/components/button.module.scss +++ b/app/components/button.module.scss @@ -5,7 +5,6 @@ align-items: center; justify-content: center; padding: 10px; - cursor: pointer; transition: all 0.3s ease; overflow: hidden; diff --git a/app/components/chat-list.tsx b/app/components/chat-list.tsx index 7ef6e7b8337..03b1a5c8803 100644 --- a/app/components/chat-list.tsx +++ b/app/components/chat-list.tsx @@ -1,5 +1,4 @@ import DeleteIcon from "../icons/delete.svg"; -import BotIcon from "../icons/bot.svg"; import styles from "./home.module.scss"; import { @@ -12,7 +11,7 @@ import { import { useChatStore } from "../store"; import Locale from "../locales"; -import { Link, useLocation, useNavigate } from "react-router-dom"; +import { useLocation, useNavigate } from "react-router-dom"; import { Path } from "../constant"; import { MaskAvatar } from "./mask"; import { Mask } from "../store/mask"; diff --git a/app/components/chat.tsx b/app/components/chat.tsx index 3cc02d48672..3d5b6a4f2c4 100644 --- a/app/components/chat.tsx +++ b/app/components/chat.tsx @@ -15,6 +15,8 @@ import RenameIcon from "../icons/rename.svg"; import ExportIcon from "../icons/share.svg"; import ReturnIcon from "../icons/return.svg"; import CopyIcon from "../icons/copy.svg"; +import SpeakIcon from "../icons/speak.svg"; +import SpeakStopIcon from "../icons/speak-stop.svg"; import LoadingIcon from "../icons/three-dots.svg"; import LoadingButtonIcon from "../icons/loading.svg"; import PromptIcon from "../icons/prompt.svg"; @@ -96,7 +98,8 @@ import { import { useNavigate } from "react-router-dom"; import { CHAT_PAGE_SIZE, - LAST_INPUT_KEY, + DEFAULT_TTS_ENGINE, + ModelProvider, Path, REQUEST_TIMEOUT_MS, UNFINISHED_INPUT, @@ -112,8 +115,16 @@ import { getClientConfig } from "../config/client"; import { useAllModels } from "../utils/hooks"; import { MultimodalContent } from "../client/api"; +import { ClientApi } from "../client/api"; +import { createTTSPlayer } from "../utils/audio"; +import { MsEdgeTTS, OUTPUT_FORMAT } from "../utils/ms_edge_tts"; + +import { isEmpty } from "lodash-es"; + const localStorage = safeLocalStorage(); +const ttsPlayer = createTTSPlayer(); + const Markdown = dynamic(async () => (await import("./markdown")).Markdown, { loading: () => , }); @@ -443,6 +454,7 @@ export function ChatActions(props: { hitBottom: boolean; uploading: boolean; setShowShortcutKeyModal: React.Dispatch>; + setUserInput: (input: string) => void; }) { const config = useAppConfig(); const navigate = useNavigate(); @@ -981,6 +993,7 @@ function _Chat() { chatStore.updateCurrentSession( (session) => (session.clearContextIndex = session.messages.length), ), + fork: () => chatStore.forkSession(), del: () => chatStore.deleteSession(chatStore.currentSessionIndex), }); @@ -1005,7 +1018,7 @@ function _Chat() { }; const doSubmit = (userInput: string) => { - if (userInput.trim() === "") return; + if (userInput.trim() === "" && isEmpty(attachImages)) return; const matchCommand = chatCommands.match(userInput); if (matchCommand.matched) { setUserInput(""); @@ -1184,10 +1197,55 @@ function _Chat() { }); }; + const accessStore = useAccessStore(); + const [speechStatus, setSpeechStatus] = useState(false); + const [speechLoading, setSpeechLoading] = useState(false); + async function openaiSpeech(text: string) { + if (speechStatus) { + ttsPlayer.stop(); + setSpeechStatus(false); + } else { + var api: ClientApi; + api = new ClientApi(ModelProvider.GPT); + const config = useAppConfig.getState(); + setSpeechLoading(true); + ttsPlayer.init(); + let audioBuffer: ArrayBuffer; + const { markdownToTxt } = require("markdown-to-txt"); + const textContent = markdownToTxt(text); + if (config.ttsConfig.engine !== DEFAULT_TTS_ENGINE) { + const edgeVoiceName = accessStore.edgeVoiceName(); + const tts = new MsEdgeTTS(); + await tts.setMetadata( + edgeVoiceName, + OUTPUT_FORMAT.AUDIO_24KHZ_96KBITRATE_MONO_MP3, + ); + audioBuffer = await tts.toArrayBuffer(textContent); + } else { + audioBuffer = await api.llm.speech({ + model: config.ttsConfig.model, + input: textContent, + voice: config.ttsConfig.voice, + speed: config.ttsConfig.speed, + }); + } + setSpeechStatus(true); + ttsPlayer + .play(audioBuffer, () => { + setSpeechStatus(false); + }) + .catch((e) => { + console.error("[OpenAI Speech]", e); + showToast(prettyObject(e)); + setSpeechStatus(false); + }) + .finally(() => setSpeechLoading(false)); + } + } + const context: RenderMessage[] = useMemo(() => { return session.mask.hideContext ? [] : session.mask.context.slice(); }, [session.mask.context, session.mask.hideContext]); - const accessStore = useAccessStore(); if ( context.length === 0 && @@ -1724,6 +1782,25 @@ function _Chat() { ) } /> + {config.ttsConfig.enable && ( + + ) : ( + + ) + } + onClick={() => + openaiSpeech(getMessageTextContent(message)) + } + /> + )} )} @@ -1741,6 +1818,7 @@ function _Chat() { {message?.tools?.map((tool) => (
{tool.isError === false ? ( @@ -1842,6 +1920,7 @@ function _Chat() { onSearch(""); }} setShowShortcutKeyModal={setShowShortcutKeyModal} + setUserInput={setUserInput} />