Skip to content

Commit

Permalink
chore: migrated to xsai
Browse files Browse the repository at this point in the history
  • Loading branch information
nekomeowww committed Dec 3, 2024
1 parent bc04dba commit 6f57d5f
Show file tree
Hide file tree
Showing 4 changed files with 77 additions and 50 deletions.
2 changes: 2 additions & 0 deletions packages/stage/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,8 @@
"@unocss/reset": "^0.65.0",
"@vueuse/core": "^12.0.0",
"@vueuse/head": "^2.0.0",
"@xsai/shared-chat-completion": "^0.0.13",
"@xsai/stream-text": "^0.0.13",
"ai": "^4.0.10",
"nprogress": "^0.2.0",
"ofetch": "^1.4.1",
Expand Down
55 changes: 32 additions & 23 deletions packages/stage/src/components/MainStage.vue
Original file line number Diff line number Diff line change
@@ -1,13 +1,9 @@
<script setup lang="ts">
import type {
CoreAssistantMessage,
CoreSystemMessage,
CoreUserMessage,
} from 'ai'
import type { AssistantMessage, Message, SystemMessage } from '@xsai/shared-chat-completion'
import type {
Emotion,
} from '../constants/emotions'
import { useLocalStorage } from '@vueuse/core'
import { storeToRefs } from 'pinia'
import { computed, onMounted, ref, watch } from 'vue'
Expand Down Expand Up @@ -42,7 +38,6 @@ const { elevenLabsApiKey, openAiApiBaseURL, openAiApiKey } = storeToRefs(useSett
const openAIModel = useLocalStorage<{ id: string, name?: string }>('openai-model', { id: 'openai/gpt-3.5-turbo', name: 'OpenAI GPT3.5 Turbo' })
const {
setupOpenAI,
streamSpeech,
stream,
models,
Expand All @@ -54,8 +49,8 @@ const listening = ref(false)
const live2DViewerRef = ref<{ setMotion: (motionName: string) => Promise<void> }>()
const supportedModels = ref<{ id: string, name?: string }[]>([])
const messageInput = ref<string>('')
const messages = ref<Array<CoreAssistantMessage | CoreUserMessage | CoreSystemMessage>>([SystemPromptV2 as CoreSystemMessage])
const streamingMessage = ref<CoreAssistantMessage>({ role: 'assistant', content: '' })
const messages = ref<Array<Message>>([SystemPromptV2 as SystemMessage])
const streamingMessage = ref<AssistantMessage>({ role: 'assistant', content: '' })
const audioAnalyser = ref<AnalyserNode>()
const mouthOpenSize = ref(0)
const nowSpeaking = ref(false)
Expand Down Expand Up @@ -183,6 +178,25 @@ function setupAnalyser() {
audioAnalyser.value = audioContext.createAnalyser()
}
async function* asyncIteratorFromReadableStream<T, F = Uint8Array>(res: ReadableStream<F>, func: (value: F) => Promise<T>): AsyncGenerator<T, void, unknown> {
// react js - TS2504: Type 'ReadableStream<Uint8Array>' must have a '[Symbol.asyncIterator]()' method that returns an async iterator - Stack Overflow
// https://stackoverflow.com/questions/76700924/ts2504-type-readablestreamuint8array-must-have-a-symbol-asynciterator
const reader = res.getReader()
try {
while (true) {
const { done, value } = await reader.read()
if (done) {
return
}
yield func(value)
}
}
finally {
reader.releaseLock()
}
}
async function onSendMessage(sendingMessage: string) {
if (!sendingMessage)
return
Expand All @@ -196,7 +210,7 @@ async function onSendMessage(sendingMessage: string) {
// const index = messages.value.length - 1
live2DViewerRef.value?.setMotion(EmotionThinkMotionName)
const res = await stream(model.value, messages.value.slice(0, messages.value.length - 1))
const res = await stream(openAiApiBaseURL.value, openAiApiKey.value, model.value, messages.value.slice(0, messages.value.length - 1))
enum States {
Literal = 'literal',
Expand All @@ -206,7 +220,7 @@ async function onSendMessage(sendingMessage: string) {
let state = States.Literal
let buffer = ''
for await (const textPart of res.textStream) {
for await (const textPart of asyncIteratorFromReadableStream(res.textStream, async v => v)) {
for (const textSingleChar of textPart) {
let newState: States = state
Expand Down Expand Up @@ -243,25 +257,20 @@ async function onSendMessage(sendingMessage: string) {
}
watch([openAiApiBaseURL, openAiApiKey], async ([baseUrl, apiKey]) => {
setupOpenAI({
apiKey,
baseURL: baseUrl,
})
if (!baseUrl || !apiKey) {
supportedModels.value = []
return
}
const fetchedModels = await models()
const fetchedModels = await models(baseUrl, apiKey)
supportedModels.value = fetchedModels.data
})
onMounted(async () => {
if (!openAiApiKey.value)
if (!openAiApiBaseURL.value || !openAiApiKey.value)
return
setupOpenAI({
apiKey: openAiApiKey.value,
baseURL: openAiApiBaseURL.value,
})
const fetchedModels = await models()
const fetchedModels = await models(openAiApiBaseURL.value, openAiApiKey.value)
supportedModels.value = fetchedModels.data
})
Expand Down
45 changes: 18 additions & 27 deletions packages/stage/src/stores/llm.ts
Original file line number Diff line number Diff line change
@@ -1,46 +1,39 @@
import type { GenerateAudioStream } from '@airi-proj/elevenlabs/types'
import type { CoreMessage } from 'ai'
import { createOpenAI, type OpenAIProvider, type OpenAIProviderSettings } from '@ai-sdk/openai'
import { streamText } from 'ai'
import type { Message } from '@xsai/shared-chat-completion'
import { streamText } from '@xsai/stream-text'
import { ofetch } from 'ofetch'
import { OpenAI } from 'openai'
import { defineStore } from 'pinia'
import { ref } from 'vue'

export const useLLM = defineStore('llm', () => {
const openAI = ref<OpenAI>()
const openAIProvider = ref<OpenAIProvider>()

function setupOpenAI(options: OpenAIProviderSettings) {
openAI.value = new OpenAI({ ...options, dangerouslyAllowBrowser: true })
openAIProvider.value = createOpenAI(options)
}

async function stream(model: string, messages: CoreMessage[]) {
if (!openAIProvider.value)
throw new Error('OpenAI not initialized')
if (openAI.value?.baseURL === '') {
throw new Error('OpenAI not initialized')
}

async function stream(apiUrl: string, apiKey: string, model: string, messages: Message[]) {
return await streamText({
model: openAIProvider.value(model),
url: `${apiUrl}/chat/completions`,
apiKey,
model,
messages,
streamOptions: {
usage: true,
},
})
}

async function models() {
if (!openAI.value)
throw new Error('OpenAI not initialized')
if (openAI.value?.baseURL === '') {
async function models(apiUrl: string, apiKey: string) {
if (apiUrl === '') {
return {
data: [],
object: '',
}
}

try {
return await openAI.value.models.list()
const openai = new OpenAI({
apiKey,
baseURL: apiUrl,
dangerouslyAllowBrowser: true,
})

return await openai.models.list()
}
catch (err) {
if (String(err).includes(`Failed to construct 'URL': Invalid URL`)) {
Expand Down Expand Up @@ -74,8 +67,6 @@ export const useLLM = defineStore('llm', () => {
}

return {
setupOpenAI,
openAI,
models,
stream,
streamSpeech,
Expand Down
25 changes: 25 additions & 0 deletions pnpm-lock.yaml

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

0 comments on commit 6f57d5f

Please sign in to comment.