Skip to content

Commit

Permalink
updating docs
Browse files Browse the repository at this point in the history
  • Loading branch information
lebrunel committed Feb 19, 2024
1 parent 92cb82b commit 021d67e
Show file tree
Hide file tree
Showing 3 changed files with 18 additions and 17 deletions.
7 changes: 3 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -100,8 +100,7 @@ defmodule MyApp.ChatLive do
# When the client invokes the "prompt" event, create a streaming request and
# asynchronously send messages back to self.
def handle_event("prompt", %{"message" => prompt}, socket) do
client = Ollama.init()
{:ok, streamer} = Ollama.completion(client, [
{:ok, streamer} = Ollama.completion(Ollama.init(), [
model: "llama2",
prompt: prompt,
stream: true,
Expand All @@ -115,7 +114,7 @@ defmodule MyApp.ChatLive do
}
end

# The streaming request sends messages back to the LiveView process
# The streaming request sends messages back to the LiveView process.
def handle_info({_request_ref, {:data, _data}} = message, socket) do
ref = socket.assigns.current_request
case message do
Expand All @@ -130,7 +129,7 @@ defmodule MyApp.ChatLive do
end
end

# The streaming request is finished
# When the streaming request is finished, remove the current reference.
def handle_async(:streaming, :ok, socket) do
{:noreply, assign(socket, current_request: nil)}
end
Expand Down
21 changes: 12 additions & 9 deletions lib/ollama.ex
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,15 @@ defmodule Ollama do
## Quickstart
> #### API change {: .warning}
> #### API change {: .info}
>
> The `Ollama.API` module has been deprecated in favour of the top level
`Ollama` module. Apologies for the namespace change. `Ollama.API` will be
removed in version 1.
> The last two minor versions have introduced breaking API changes. We'll stop
doing this at version 1.0.0 - promise 🙏🏻.
>
> - `0.5.0` - Streaming requests no longer return a `t:Task.t/0`, they return
a `t:Ollama.Streaming.t/0` struct. Refer to the [section on Streaming](#module-streaming).
> - `0.4.0` - The `Ollama.API` module has been deprecated in favour of the top
level `Ollama` module. `Ollama.API` will be removed in version 1.
Assuming you have Ollama running on localhost, and that you have installed a
model, use `completion/2` or `chat/2` interact with the model.
Expand Down Expand Up @@ -117,8 +121,7 @@ defmodule Ollama do
# When the client invokes the "prompt" event, create a streaming request and
# asynchronously send messages back to self.
def handle_event("prompt", %{"message" => prompt}, socket) do
client = Ollama.init()
{:ok, streamer} = Ollama.completion(client, [
{:ok, streamer} = Ollama.completion(Ollama.init(), [
model: "llama2",
prompt: prompt,
stream: true,
Expand All @@ -132,7 +135,7 @@ defmodule Ollama do
}
end
# The streaming request sends messages back to the LiveView process
# The streaming request sends messages back to the LiveView process.
def handle_info({_request_ref, {:data, _data}} = message, socket) do
ref = socket.assigns.current_request
case message do
Expand All @@ -147,7 +150,7 @@ defmodule Ollama do
end
end
# The streaming request is finished
# When the streaming request is finished, remove the current reference.
def handle_async(:streaming, :ok, socket) do
{:noreply, assign(socket, current_request: nil)}
end
Expand Down Expand Up @@ -191,7 +194,7 @@ defmodule Ollama do
@type message() :: map()

@typedoc "Client response"
@type response() :: {:ok, Streaming.t() | map() | boolean()} | {:error, term()}
@type response() :: {:ok, map() | boolean() | Streaming.t()} | {:error, term()}

@typep req_response() :: {:ok, Req.Response.t() | Streaming.t()} | {:error, term()}

Expand Down
7 changes: 3 additions & 4 deletions lib/ollama/streaming.ex
Original file line number Diff line number Diff line change
Expand Up @@ -30,8 +30,7 @@ defmodule Ollama.Streaming do
# When the client invokes the "prompt" event, create a streaming request and
# asynchronously send messages back to self.
def handle_event("prompt", %{"message" => prompt}, socket) do
client = Ollama.init()
{:ok, streamer} = Ollama.completion(client, [
{:ok, streamer} = Ollama.completion(Ollama.init(), [
model: "llama2",
prompt: prompt,
stream: true,
Expand All @@ -45,7 +44,7 @@ defmodule Ollama.Streaming do
}
end
# The streaming request sends messages back to the LiveView process
# The streaming request sends messages back to the LiveView process.
def handle_info({_request_ref, {:data, _data}} = message, socket) do
ref = socket.assigns.current_request
case message do
Expand All @@ -60,7 +59,7 @@ defmodule Ollama.Streaming do
end
end
# The streaming request is finished
# When the streaming request is finished, remove the current reference.
def handle_async(:streaming, :ok, socket) do
{:noreply, assign(socket, current_request: nil)}
end
Expand Down

0 comments on commit 021d67e

Please sign in to comment.