diff --git a/README.md b/README.md index 111aa1f..14dfdda 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ pip install ollama ```python import ollama -response = ollama.chat(model='llama3', messages=[ +response = ollama.chat(model='llama3.1', messages=[ { 'role': 'user', 'content': 'Why is the sky blue?', @@ -29,7 +29,7 @@ Response streaming can be enabled by setting `stream=True`, modifying function c import ollama stream = ollama.chat( - model='llama3', + model='llama3.1', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}], stream=True, ) @@ -45,13 +45,13 @@ The Ollama Python library's API is designed around the [Ollama REST API](https:/ ### Chat ```python -ollama.chat(model='llama3', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}]) +ollama.chat(model='llama3.1', messages=[{'role': 'user', 'content': 'Why is the sky blue?'}]) ``` ### Generate ```python -ollama.generate(model='llama3', prompt='Why is the sky blue?') +ollama.generate(model='llama3.1', prompt='Why is the sky blue?') ``` ### List @@ -63,14 +63,14 @@ ollama.list() ### Show ```python -ollama.show('llama3') +ollama.show('llama3.1') ``` ### Create ```python modelfile=''' -FROM llama3 +FROM llama3.1 SYSTEM You are mario from super mario bros. ''' @@ -80,31 +80,31 @@ ollama.create(model='example', modelfile=modelfile) ### Copy ```python -ollama.copy('llama3', 'user/llama3') +ollama.copy('llama3.1', 'user/llama3.1') ``` ### Delete ```python -ollama.delete('llama3') +ollama.delete('llama3.1') ``` ### Pull ```python -ollama.pull('llama3') +ollama.pull('llama3.1') ``` ### Push ```python -ollama.push('user/llama3') +ollama.push('user/llama3.1') ``` ### Embeddings ```python -ollama.embeddings(model='llama3', prompt='The sky is blue because of rayleigh scattering') +ollama.embeddings(model='llama3.1', prompt='The sky is blue because of rayleigh scattering') ``` ### Ps @@ -123,7 +123,7 @@ A custom client can be created with the following fields: ```python from ollama import Client client = Client(host='http://localhost:11434') -response = client.chat(model='llama3', messages=[ +response = client.chat(model='llama3.1', messages=[ { 'role': 'user', 'content': 'Why is the sky blue?', @@ -139,7 +139,7 @@ from ollama import AsyncClient async def chat(): message = {'role': 'user', 'content': 'Why is the sky blue?'} - response = await AsyncClient().chat(model='llama3', messages=[message]) + response = await AsyncClient().chat(model='llama3.1', messages=[message]) asyncio.run(chat()) ``` @@ -152,7 +152,7 @@ from ollama import AsyncClient async def chat(): message = {'role': 'user', 'content': 'Why is the sky blue?'} - async for part in await AsyncClient().chat(model='llama3', messages=[message], stream=True): + async for part in await AsyncClient().chat(model='llama3.1', messages=[message], stream=True): print(part['message']['content'], end='', flush=True) asyncio.run(chat())