Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Prepare release 1.0 #32

Merged
merged 4 commits into from
Nov 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 0 additions & 2 deletions .github/workflows/ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,6 @@ jobs:

- name: Setup pnpm
uses: pnpm/[email protected]
with:
version: 9.6.0

- name: Use Node.js ${{ matrix.node-version }}
uses: actions/setup-node@v2
Expand Down
6 changes: 0 additions & 6 deletions .github/workflows/quality.yml
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,6 @@ jobs:

- name: Setup pnpm
uses: pnpm/[email protected]
with:
version: 9.6.0

- name: Use Node.js 18
uses: actions/setup-node@v3
Expand All @@ -40,8 +38,6 @@ jobs:

- name: Setup pnpm
uses: pnpm/[email protected]
with:
version: 9.6.0

- name: Use Node.js 18
uses: actions/setup-node@v3
Expand All @@ -64,8 +60,6 @@ jobs:

- name: Setup pnpm
uses: pnpm/[email protected]
with:
version: 9.6.0

- name: Use Node.js 18
uses: actions/setup-node@v3
Expand Down
8 changes: 4 additions & 4 deletions examples/ai-core/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,10 @@
"private": true,
"scripts": {},
"dependencies": {
"@opentelemetry/sdk-node": "0.52.0",
"@opentelemetry/sdk-node": "0.54.2",
"@opentelemetry/auto-instrumentations-node": "0.47.0",
"@opentelemetry/sdk-trace-node": "1.25.0",
"ai": "latest",
"@opentelemetry/sdk-trace-node": "1.27.0",
"ai": "^4.0.0",
"commander": "^12.1.0",
"mathjs": "12.4.2",
"ollama-ai-provider": "workspace:latest",
Expand All @@ -17,6 +17,6 @@
"devDependencies": {
"@types/node": "20.11.20",
"tsx": "4.7.1",
"typescript": "5.5.4"
"typescript": "5.6.3"
}
}
2 changes: 1 addition & 1 deletion examples/ai-core/src/generate-object/ollama-multimodal.ts
Original file line number Diff line number Diff line change
Expand Up @@ -33,4 +33,4 @@ async function main(model: Parameters<typeof ollama>[0]) {
console.log(JSON.stringify(object.artwork, null, 2))
}

buildProgram('llava:13b', main).catch(console.error)
buildProgram('llama3.2-vision', main).catch(console.error)
26 changes: 26 additions & 0 deletions examples/ai-core/src/generate-text/ollama-multi-step-continue.ts
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
#! /usr/bin/env -S pnpm tsx

import { generateText } from 'ai'
import { ollama } from 'ollama-ai-provider'

import { buildProgram } from '../tools/command'

async function main(model: Parameters<typeof ollama>[0]) {
const { steps, text, usage } = await generateText({
experimental_continueSteps: true, // 4096 output tokens
maxSteps: 5,
model: ollama(model),
prompt:
'Write a book about Roman history, ' +
'from the founding of the city of Rome ' +
'to the fall of the Western Roman Empire. ' +
'Each chapter MUST HAVE at least 1000 words.',
})

console.log(text)
console.log()
console.log('Usage:', usage)
console.log('# of steps:', steps.length)
}

buildProgram('qwen2.5', main).catch(console.error)
2 changes: 1 addition & 1 deletion examples/ai-core/src/generate-text/ollama-multi-step.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ import { z } from 'zod'
import { buildProgram } from '../tools/command'

async function main(model: Parameters<typeof ollama>[0]) {
const { text, usage } = await generateText({
await generateText({
maxSteps: 5,
model: ollama(model),
onStepFinish: (step) => {
Expand Down
4 changes: 2 additions & 2 deletions examples/ai-core/src/registry/setup-registry.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,8 +7,8 @@ import { ollama } from 'ollama-ai-provider'
const myOllama = customProvider({
fallbackProvider: ollama,
languageModels: {
multimodal: ollama('llava'),
text: ollama('llama3.1'),
multimodal: ollama('llama3.2-vision'),
text: ollama('llama3.2'),
},
})

Expand Down
86 changes: 0 additions & 86 deletions examples/ai-core/src/stream-text/ollama-chatbot-with-tools.ts

This file was deleted.

24 changes: 19 additions & 5 deletions examples/ai-core/src/stream-text/ollama-chatbot.ts
Original file line number Diff line number Diff line change
Expand Up @@ -2,8 +2,9 @@

import * as readline from 'node:readline/promises'

import { CoreMessage, streamText } from 'ai'
import { CoreMessage, streamText, tool } from 'ai'
import { ollama } from 'ollama-ai-provider'
import { z } from 'zod'

import { buildProgram } from '../tools/command'

Expand All @@ -21,20 +22,33 @@ async function main(model: Parameters<typeof ollama>[0]) {
messages.push({ content: userInput, role: 'user' })

const result = await streamText({
maxSteps: 5,
messages,
model: ollama(model),
system: `You are a helpful, respectful and honest assistant.`,
tools: {
weather: tool({
description: 'Get the weather in a location',
execute: async ({ location }) => ({
location,
temperature: 72 + Math.floor(Math.random() * 21) - 10,
}),
parameters: z.object({
location: z
.string()
.describe('The location to get the weather for'),
}),
}),
},
})

let fullResponse = ''
process.stdout.write('\nAssistant: ')
for await (const delta of result.textStream) {
fullResponse += delta
process.stdout.write(delta)
}
process.stdout.write('\n\n')

messages.push({ content: fullResponse, role: 'assistant' })
// eslint-disable-next-line unicorn/no-await-expression-member
messages.push(...(await result.response).messages)
}
}

Expand Down
4 changes: 2 additions & 2 deletions examples/ai-core/src/tools/command.ts
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,8 @@ import { ollama } from 'ollama-ai-provider'

export async function buildProgram(
defaultModel:
| Parameters<typeof ollama>[0]
| Parameters<typeof ollama.embedding>[0],
| Parameters<typeof ollama.languageModel>[0]
| Parameters<typeof ollama.textEmbeddingModel>[0],
action: (model: string) => Promise<void>,
) {
const program = new Command()
Expand Down
5 changes: 4 additions & 1 deletion package.json
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,9 @@
"vite-tsconfig-paths": "^4.3.2",
"vitest": "^2.1.3"
},
"engines": {
"node": "^18.0.0 || ^20.0.0"
},
"homepage": "https://github.com/sgomez/ollama-ai-provider",
"repository": {
"type": "git",
Expand All @@ -48,5 +51,5 @@
"keywords": [
"ai"
],
"packageManager": "pnpm@9.6.0"
"packageManager": "pnpm@9.12.3"
}
6 changes: 3 additions & 3 deletions packages/ollama/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -26,15 +26,15 @@
"author": "Sergio Gómez Bachiller <[email protected]>",
"license": "Apache-2.0",
"dependencies": {
"@ai-sdk/provider": "0.0.26",
"@ai-sdk/provider-utils": "1.0.22",
"@ai-sdk/provider": "^1.0.0",
"@ai-sdk/provider-utils": "^2.0.0",
"partial-json": "0.1.7"
},
"devDependencies": {
"@edge-runtime/vm": "^3.2.0",
"@types/node": "^18.19.56",
"tsup": "^8.3.0",
"typescript": "5.5.4",
"typescript": "5.6.3",
"zod": "3.23.8"
},
"peerDependencies": {
Expand Down
1 change: 0 additions & 1 deletion packages/ollama/src/index.ts
Original file line number Diff line number Diff line change
@@ -1,3 +1,2 @@
export * from './ollama-facade'
export type { OllamaProvider, OllamaProviderSettings } from './ollama-provider'
export { createOllama, ollama } from './ollama-provider'
4 changes: 2 additions & 2 deletions packages/ollama/src/ollama-chat-language-model.ts
Original file line number Diff line number Diff line change
Expand Up @@ -98,10 +98,10 @@ export class OllamaChatLanguageModel implements LanguageModelV1 {
repeat_last_n: this.settings.repeatLastN,
repeat_penalty: this.settings.repeatPenalty,
seed,
stop: this.settings.stop ?? stopSequences,
stop: stopSequences,
temperature,
tfs_z: this.settings.tfsZ,
top_k: this.settings.topK ?? topK,
top_k: topK,
top_p: topP,
typical_p: this.settings.typicalP,
use_mlock: this.settings.useMlock,
Expand Down
28 changes: 12 additions & 16 deletions packages/ollama/src/ollama-chat-settings.ts
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
export type OllamaChatModelId =
| 'athene-v2'
| 'athene-v2:72b'
| 'aya-expanse'
| 'aya-expanse:8b'
| 'aya-expanse:32b'
Expand Down Expand Up @@ -92,6 +94,9 @@ export type OllamaChatModelId =
| 'nemotron:70b'
| 'nemotron-mini'
| 'nemotron-mini:4b'
| 'opencoder'
| 'opencoder:1.5b'
| 'opencoder:8b'
| 'phi3'
| 'phi3:3.8b'
| 'phi3:14b'
Expand All @@ -117,8 +122,12 @@ export type OllamaChatModelId =
| 'qwen2.5:32b'
| 'qwen2.5:72b'
| 'qwen2.5-coder'
| 'qwen2.5-coder:0.5b'
| 'qwen2.5-coder:1.5b'
| 'qwen2.5-coder:3b'
| 'qwen2.5-coder:7b'
| 'qwen2.5-coder:14b'
| 'qwen2.5-coder:32b'
| 'shieldgemma'
| 'shieldgemma:2b'
| 'shieldgemma:9b'
Expand All @@ -127,6 +136,9 @@ export type OllamaChatModelId =
| 'smollm:135m'
| 'smollm:360m'
| 'smollm:1.7b'
| 'tulu3'
| 'tulu3:8b'
| 'tulu3:70b'
| (string & NonNullable<unknown>)

export interface OllamaChatSettings {
Expand Down Expand Up @@ -224,28 +236,12 @@ export interface OllamaChatSettings {
*/
repeatPenalty?: number

/**
* Sets the stop sequences to use. When this pattern is encountered the LLM will stop generating text and return.
* Multiple stop patterns may be set by specifying multiple separate `stop` parameters in a modelfile.
*
* @deprecated Use `stopSequences` from AI SDK functions.
*/
stop?: string

/**
* Tail free sampling is used to reduce the impact of less probable tokens from the output. A higher value (e.g., 2.0)
* will reduce the impact more, while a value of 1.0 disables this setting. (default: 1)
*/
tfsZ?: number

/**
* Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a
* lower value (e.g. 10) will be more conservative. (Default: 40)
*
* @deprecated Use `topK` from AI SDK functions.
*/
topK?: number

/**
* Controls the "typical" sampling probability. (Default: 1.0)
*/
Expand Down
Loading
Loading