diff --git a/packages/sdk/server-ai/examples/bedrock/src/index.ts b/packages/sdk/server-ai/examples/bedrock/src/index.ts index 95bd83da6..57aea2a07 100644 --- a/packages/sdk/server-ai/examples/bedrock/src/index.ts +++ b/packages/sdk/server-ai/examples/bedrock/src/index.ts @@ -4,7 +4,7 @@ import { BedrockRuntimeClient, ConverseCommand, Message } from '@aws-sdk/client- import { init } from '@launchdarkly/node-server-sdk'; import { initAi } from '@launchdarkly/server-sdk-ai'; -const sdkKey = process.env.LAUNCHDARKLY_SDK_KEY; +const sdkKey = process.env.LAUNCHDARKLY_SDK_KEY || 'sdk-8822efc4-1b96-468f-9edb-1db6a75d9701'; const aiConfigKey = process.env.LAUNCHDARKLY_AI_CONFIG_KEY || 'sample-ai-config'; const awsClient = new BedrockRuntimeClient({ region: 'us-east-1' }); @@ -18,7 +18,10 @@ if (!aiConfigKey) { process.exit(1); } -const ldClient = init(sdkKey); +const ldClient = init(sdkKey, { + baseUri: 'https://ld-stg.launchdarkly.com', + streamUri: 'https://stream-stg.launchdarkly.com', +}); // Set up the context properties const context = { diff --git a/packages/sdk/server-ai/src/api/config/LDAIConfig.ts b/packages/sdk/server-ai/src/api/config/LDAIConfig.ts index 0d47f6eac..5bd8748bc 100644 --- a/packages/sdk/server-ai/src/api/config/LDAIConfig.ts +++ b/packages/sdk/server-ai/src/api/config/LDAIConfig.ts @@ -9,6 +9,16 @@ export interface LDModelConfig { */ modelId?: string; + /** + * The temperature of the model. + */ + temperature?: number; + + /** + * The maximum number of tokens to generate. + */ + maxTokens?: number; + /** * And additional model specific information. */