From 978dfa95d1c25f942d96b730b187f92af045f90f Mon Sep 17 00:00:00 2001 From: Ryan Lamb <4955475+kinyoklion@users.noreply.github.com> Date: Tue, 12 Nov 2024 13:07:30 -0800 Subject: [PATCH] fix: Update default typings to include enabled. (#680) BEGIN_COMMIT_OVERRIDE fix: Update default typings to include enabled. feat: Include temperature and maxTokens in LDModelConfig. END_COMMIT_OVERRIDE --- packages/sdk/server-ai/src/api/LDAIClient.ts | 13 +++++++++++-- packages/sdk/server-ai/src/api/config/LDAIConfig.ts | 11 +++++++++++ 2 files changed, 22 insertions(+), 2 deletions(-) diff --git a/packages/sdk/server-ai/src/api/LDAIClient.ts b/packages/sdk/server-ai/src/api/LDAIClient.ts index 990485819..cffd657a7 100644 --- a/packages/sdk/server-ai/src/api/LDAIClient.ts +++ b/packages/sdk/server-ai/src/api/LDAIClient.ts @@ -3,9 +3,18 @@ import { LDContext } from '@launchdarkly/js-server-sdk-common'; import { LDAIConfig, LDGenerationConfig } from './config/LDAIConfig'; /** - * Interface for performing AI operations using LaunchDarkly. + * Interface for default model configuration. */ +export interface LDAIDefaults extends LDGenerationConfig { + /** + * Whether the configuration is enabled. + */ + enabled?: boolean; +} +/** + * Interface for performing AI operations using LaunchDarkly. + */ export interface LDAIClient { /** * Parses and interpolates a template string with the provided variables. @@ -68,7 +77,7 @@ export interface LDAIClient { * } * ``` */ - modelConfig( + modelConfig( key: string, context: LDContext, defaultValue: TDefault, diff --git a/packages/sdk/server-ai/src/api/config/LDAIConfig.ts b/packages/sdk/server-ai/src/api/config/LDAIConfig.ts index 0d47f6eac..432a0a732 100644 --- a/packages/sdk/server-ai/src/api/config/LDAIConfig.ts +++ b/packages/sdk/server-ai/src/api/config/LDAIConfig.ts @@ -9,6 +9,17 @@ export interface LDModelConfig { */ modelId?: string; + /** + * Tuning parameter for randomness versus determinism. Exact effect will be determined by the + * model. + */ + temperature?: number; + + /** + * The maximum number of tokens. + */ + maxTokens?: number; + /** * And additional model specific information. */