From 067a3880fb343267b34d9e03ba57797e7a49a20f Mon Sep 17 00:00:00 2001 From: yeus Date: Fri, 24 May 2024 21:48:23 -0700 Subject: [PATCH] slightly refactor chatTask rpocessing... --- src/modules/taskyon/taskWorker.ts | 68 +++++++++++++++---------------- 1 file changed, 33 insertions(+), 35 deletions(-) diff --git a/src/modules/taskyon/taskWorker.ts b/src/modules/taskyon/taskWorker.ts index 8f5afb3f..a3b6cd26 100644 --- a/src/modules/taskyon/taskWorker.ts +++ b/src/modules/taskyon/taskWorker.ts @@ -72,15 +72,11 @@ function extractOpenAIFunctions( export async function processChatTask( task: LLMTask, chatState: ChatStateType, - apiKeys: Record, + apiKey: string, taskManager: TyTaskManager, taskWorkerController: TaskWorkerController ) { // TODO: refactor this function! - let apiKey = apiKeys[chatState.selectedApi]; - if (!apiKey || apiKey.trim() === '') { - apiKey = apiKeys['taskyon'] || 'free'; - } // TODO: can we interrupt non-streaming tasks? possibly using an AbortController. //TODO: merge this function with the assistants function if (chatState.useOpenAIAssistants && chatState.selectedApi == 'openai') { @@ -199,36 +195,37 @@ export async function processChatTask( }); }); } else if (chatCompletion && chatState.selectedApi === 'taskyon') { - // TODO: cancel this section, if we're not logged in to taskyon... - void sleep(5000).then(() => { - const headers = generateHeaders( - apiKey, - chatState.siteUrl, - api.name - ); - const baseUrl = new URL(api.baseURL).origin; - console.log('get generation info from ', baseUrl); - const url = `${baseUrl}/rest/v1/api_usage_log?select=reference_data&id=eq.${chatCompletion.id}`; - void fetch(url, { headers }) - .then((response) => { - if (!response.ok) { - throw new Error( - `Could not find generation information for task ${task.id}` + if (apiKey) { + void sleep(5000).then(() => { + const headers = generateHeaders( + apiKey, + chatState.siteUrl, + api.name + ); + const baseUrl = new URL(api.baseURL).origin; + console.log('get generation info from ', baseUrl); + const url = `${baseUrl}/rest/v1/api_usage_log?select=reference_data&id=eq.${chatCompletion.id}`; + void fetch(url, { headers }) + .then((response) => { + if (!response.ok) { + throw new Error( + `Could not find generation information for task ${task.id}` + ); + } + return response.json() as Promise< + { reference_data: OpenRouterGenerationInfo }[] + >; + }) + .then((data) => { + console.log('taskyon generation info:', data); + enrichWithDelayedUsageInfos( + task, + taskManager, + data[0].reference_data ); - } - return response.json() as Promise< - { reference_data: OpenRouterGenerationInfo }[] - >; - }) - .then((data) => { - console.log('taskyon generation info:', data); - enrichWithDelayedUsageInfos( - task, - taskManager, - data[0].reference_data - ); - }); - }); + }); + }); + } } else if (chatCompletion?.usage) { // openai sends back the exact number of prompt tokens :) task.debugging.promptTokens = chatCompletion.usage.prompt_tokens; @@ -546,10 +543,11 @@ async function processTask( if ('message' in task.content || 'functionResult' in task.content) { // TODO: get rid of "taskManager" in processChatTask + const apiKey = apiKeys[chatState.selectedApi]; task = await processChatTask( task, chatState, - apiKeys, + apiKey, taskManager, taskWorkerController );