diff --git a/README.md b/README.md index ccf2e60..7edc416 100644 --- a/README.md +++ b/README.md @@ -4,7 +4,7 @@ Groq Cloud runs LLM models fast and cheap. Llama 3, Mixtrel, Gemma, and more at [![speed-pricing](docs/images/groq-speed-price-20240421.png)](https://wow.groq.com/) -Speed and pricing at 2024-04-21. +Speed and pricing at 2024-04-21. Also see their [changelog](https://console.groq.com/docs/changelog) for new models and features. ## Groq Cloud API @@ -185,7 +185,7 @@ Assistant reply with model gemma-7b-it: LLMs are increasingly supporting deferring to tools or functions to fetch data, perform calculations, or store structured data. Groq Cloud in turn then supports their tool implementations through its API. -See the [Using Tools](https://console.groq.com/docs/tool-use) documentation for the list of models that currently support tools. +See the [Using Tools](https://console.groq.com/docs/tool-use) documentation for the list of models that currently support tools. Others might support it sometimes and raise errors other times. ```ruby @client = Groq::Client.new(model_id: "mixtral-8x7b-32768") diff --git a/lib/groq/client.rb b/lib/groq/client.rb index 6d6ce8b..4c219bc 100644 --- a/lib/groq/client.rb +++ b/lib/groq/client.rb @@ -46,6 +46,13 @@ def chat(messages, model_id: nil, tools: nil) end end + def get(path:) + client.get do |req| + req.url path + req.headers["Authorization"] = "Bearer #{@api_key}" + end + end + def post(path:, body:) client.post do |req| req.url path diff --git a/lib/groq/model.rb b/lib/groq/model.rb index b6a99e7..853ab55 100644 --- a/lib/groq/model.rb +++ b/lib/groq/model.rb @@ -49,5 +49,23 @@ def default_model def default_model_id default_model[:model_id] end + + # https://api.groq.com/openai/v1/models + # Output: + # {"object": "list", + # "data": [ + # { + # "id": "gemma-7b-it", + # "object": "model", + # "created": 1693721698, + # "owned_by": "Google", + # "active": true, + # "context_window": 8192 + # }, + def load_models(client:) + client ||= Groq::Client.new + response = client.get(path: "/openai/v1/models") + response.body + end end end diff --git a/test/fixtures/vcr_cassettes/api/get_models.yml b/test/fixtures/vcr_cassettes/api/get_models.yml new file mode 100644 index 0000000..617fdc1 --- /dev/null +++ b/test/fixtures/vcr_cassettes/api/get_models.yml @@ -0,0 +1,58 @@ +--- +http_interactions: +- request: + method: get + uri: https://api.groq.com/openai/v1/models + body: + encoding: US-ASCII + string: '' + headers: + User-Agent: + - Faraday v2.9.0 + Authorization: + - Bearer + Accept-Encoding: + - gzip;q=1.0,deflate;q=0.6,identity;q=0.3 + Accept: + - "*/*" + response: + status: + code: 200 + message: OK + headers: + Date: + - Sat, 20 Apr 2024 20:56:50 GMT + Content-Type: + - application/json + Transfer-Encoding: + - chunked + Connection: + - keep-alive + Cache-Control: + - private, max-age=0, no-store, no-cache, must-revalidate + Vary: + - Origin, Accept-Encoding + X-Request-Id: + - req_01hvymr9mpf58vvv3r66m1p3y9 + Via: + - 1.1 google + Alt-Svc: + - h3=":443"; ma=86400 + Cf-Cache-Status: + - DYNAMIC + Set-Cookie: + - __cf_bm=1_7GmiVeiw0MZ7WgpYSjlt41aefHP9xwUc8hNvwwewE-1713646610-1.0.1.1-qULpGYO__MQhQfX5bCLhqzZ9nNP1GGW7tQRdYuE53yICW9WLOXFaA0swUiwE4hKBXeSU1UAr6M46BAcWjMuKJA; + path=/; expires=Sat, 20-Apr-24 21:26:50 GMT; domain=.groq.com; HttpOnly; Secure; + SameSite=None + Server: + - cloudflare + Cf-Ray: + - 87780b0fb8efa80e-SYD + body: + encoding: ASCII-8BIT + string: '{"object":"list","data":[{"id":"gemma-7b-it","object":"model","created":1693721698,"owned_by":"Google","active":true,"context_window":8192},{"id":"llama2-70b-4096","object":"model","created":1693721698,"owned_by":"Meta","active":true,"context_window":4096},{"id":"llama3-70b-8192","object":"model","created":1693721698,"owned_by":"Meta","active":true,"context_window":8192},{"id":"llama3-8b-8192","object":"model","created":1693721698,"owned_by":"Meta","active":true,"context_window":8192},{"id":"mixtral-8x7b-32768","object":"model","created":1693721698,"owned_by":"Mistral + AI","active":true,"context_window":32768}]} + + ' + recorded_at: Sat, 20 Apr 2024 20:56:49 GMT +recorded_with: VCR 6.2.0 diff --git a/test/groq/test_models.rb b/test/groq/test_models.rb new file mode 100644 index 0000000..9f2cc80 --- /dev/null +++ b/test/groq/test_models.rb @@ -0,0 +1,22 @@ +# frozen_string_literal: true + +require "test_helper" + +class TestGroqModel < Minitest::Test + def test_load_models + VCR.use_cassette("api/get_models") do + client = Groq::Client.new + models = Groq::Model.load_models(client: client) + expected = { + "object" => "list", + "data" => + [{"id" => "gemma-7b-it", "object" => "model", "created" => 1693721698, "owned_by" => "Google", "active" => true, "context_window" => 8192}, + {"id" => "llama2-70b-4096", "object" => "model", "created" => 1693721698, "owned_by" => "Meta", "active" => true, "context_window" => 4096}, + {"id" => "llama3-70b-8192", "object" => "model", "created" => 1693721698, "owned_by" => "Meta", "active" => true, "context_window" => 8192}, + {"id" => "llama3-8b-8192", "object" => "model", "created" => 1693721698, "owned_by" => "Meta", "active" => true, "context_window" => 8192}, + {"id" => "mixtral-8x7b-32768", "object" => "model", "created" => 1693721698, "owned_by" => "Mistral AI", "active" => true, "context_window" => 32768}] + } + assert_equal expected, models + end + end +end