From 71b335fcf6e12f021292d6372aaa7ec34cca5560 Mon Sep 17 00:00:00 2001 From: Ismail Pelaseyed Date: Tue, 16 Apr 2024 10:36:01 +0200 Subject: [PATCH 01/30] Upgrade Fern --- fern/fern.config.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fern/fern.config.json b/fern/fern.config.json index c726f28bf..5c1f4bc6f 100644 --- a/fern/fern.config.json +++ b/fern/fern.config.json @@ -1,4 +1,4 @@ { "organization": "superagent", - "version": "0.16.43" + "version": "0.21.0" } \ No newline at end of file From deb73295024135a685702a2b069538964ad7d36b Mon Sep 17 00:00:00 2001 From: Ismail Pelaseyed Date: Tue, 16 Apr 2024 10:40:43 +0200 Subject: [PATCH 02/30] Update OpenAPI spec --- fern/apis/prod/openapi/openapi.yaml | 29 ++++++++++++++++++++++++++++- 1 file changed, 28 insertions(+), 1 deletion(-) diff --git a/fern/apis/prod/openapi/openapi.yaml b/fern/apis/prod/openapi/openapi.yaml index 27aba78d1..5bca52af4 100644 --- a/fern/apis/prod/openapi/openapi.yaml +++ b/fern/apis/prod/openapi/openapi.yaml @@ -2,7 +2,7 @@ openapi: 3.0.2 info: title: Superagent description: 🥷 Run AI-agents with an API - version: 0.2.29 + version: 0.2.30 servers: - url: https://api.beta.superagent.sh paths: @@ -1507,6 +1507,33 @@ paths: $ref: '#/components/schemas/HTTPValidationError' security: - HTTPBearer: [] + delete: + tags: + - Vector Database + summary: Delete + description: Delete a Vector Database + operationId: delete_api_v1_vector_dbs__vector_db_id__delete + parameters: + - required: true + schema: + title: Vector Db Id + type: string + name: vector_db_id + in: path + responses: + '200': + description: Successful Response + content: + application/json: + schema: {} + '422': + description: Validation Error + content: + application/json: + schema: + $ref: '#/components/schemas/HTTPValidationError' + security: + - HTTPBearer: [] patch: tags: - Vector Database From 49f651668243dac386aa9a191150a0cfd3171fa2 Mon Sep 17 00:00:00 2001 From: Ismail Pelaseyed Date: Wed, 17 Apr 2024 06:43:00 +0200 Subject: [PATCH 03/30] Downgrade Fern --- fern/fern.config.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fern/fern.config.json b/fern/fern.config.json index 5c1f4bc6f..c726f28bf 100644 --- a/fern/fern.config.json +++ b/fern/fern.config.json @@ -1,4 +1,4 @@ { "organization": "superagent", - "version": "0.21.0" + "version": "0.16.43" } \ No newline at end of file From 6db3666b90caf13503e4fd807cdf83db49c3ddba Mon Sep 17 00:00:00 2001 From: Ali Salimli <67149699+elisalimli@users.noreply.github.com> Date: Thu, 18 Apr 2024 15:41:11 +0400 Subject: [PATCH 04/30] Gracefully handling parsing errors for structured outputs (#966) * feat: gracefully handling parsing errors for structured outputs * chore: stringify parsed response --- libs/superagent/app/api/agents.py | 18 ++++++++++++++---- libs/superagent/app/api/workflows.py | 9 +++++++-- libs/superagent/app/workflows/base.py | 17 +++++++++++++---- 3 files changed, 34 insertions(+), 10 deletions(-) diff --git a/libs/superagent/app/api/agents.py b/libs/superagent/app/api/agents.py index 6cf6bb4bf..e4fa20476 100644 --- a/libs/superagent/app/api/agents.py +++ b/libs/superagent/app/api/agents.py @@ -501,10 +501,14 @@ async def send_message( from langchain.output_parsers.json import SimpleJsonOutputParser parser = SimpleJsonOutputParser() - parsed_schema = str(parser.parse(schema_tokens)) + try: + parsed_res = parser.parse(schema_tokens) + except Exception as e: + logger.error(f"Error parsing output: {e}") + parsed_res = {} # stream line by line to prevent streaming large data in one go - for line in parsed_schema.split("\n"): + for line in json.dumps(parsed_res).split("\n"): async for val in stream_dict_keys( {"event": "message", "data": line} ): @@ -609,8 +613,14 @@ async def send_message( if output_schema: from langchain.output_parsers.json import SimpleJsonOutputParser - json_parser = SimpleJsonOutputParser() - output["output"] = json_parser.parse(text=output["output"]) + parser = SimpleJsonOutputParser() + try: + output["output"] = parser.parse(text=output["output"]) + except Exception as e: + logger.error(f"Error parsing output: {e}") + output["output"] = {} + + output = json.dumps(output) return {"success": True, "data": output} diff --git a/libs/superagent/app/api/workflows.py b/libs/superagent/app/api/workflows.py index 270e3a484..e306eefb7 100644 --- a/libs/superagent/app/api/workflows.py +++ b/libs/superagent/app/api/workflows.py @@ -304,10 +304,15 @@ async def send_message() -> AsyncIterable[str]: from langchain.output_parsers.json import SimpleJsonOutputParser parser = SimpleJsonOutputParser() - parsed_schema = str(parser.parse(schema_tokens)) + try: + parsed_res = parser.parse(schema_tokens) + except Exception as e: + # TODO: stream schema parsing error as well + logger.error(f"Error in parsing schema: {e}") + parsed_res = {} # stream line by line to prevent streaming large data in one go - for line in parsed_schema.split("\n"): + for line in json.dumps(parsed_res).split("\n"): agent_name = workflow_step["agent_name"] async for val in stream_dict_keys( { diff --git a/libs/superagent/app/workflows/base.py b/libs/superagent/app/workflows/base.py index d62f13823..a85d5931b 100644 --- a/libs/superagent/app/workflows/base.py +++ b/libs/superagent/app/workflows/base.py @@ -1,3 +1,5 @@ +import json +import logging from typing import Any, List from agentops.langchain_callback_handler import ( @@ -9,6 +11,8 @@ from app.agents.base import AgentBase from app.utils.callbacks import CustomAsyncIteratorCallbackHandler +logger = logging.getLogger(__name__) + class WorkflowBase: def __init__( @@ -59,10 +63,15 @@ async def arun(self, input: Any): ) if output_schema: # TODO: throw error if output is not valid - json_parser = SimpleJsonOutputParser() - agent_response["output"] = json_parser.parse( - text=agent_response["output"] - ) + parser = SimpleJsonOutputParser() + try: + agent_response["output"] = parser.parse( + text=agent_response["output"] + ) + except Exception as e: + logger.error(f"Error parsing output: {e}") + agent_response["output"] = {} + agent_response["output"] = json.dumps(agent_response["output"]) previous_output = agent_response.get("output") steps_output.append(agent_response) From 9a90e5f3a7f7f4e8bb3c56e64563915f7b813b8a Mon Sep 17 00:00:00 2001 From: alisalim17 Date: Thu, 18 Apr 2024 17:02:14 +0400 Subject: [PATCH 05/30] fix: revert breaking change --- libs/superagent/app/api/agents.py | 2 -- libs/superagent/app/workflows/base.py | 2 -- 2 files changed, 4 deletions(-) diff --git a/libs/superagent/app/api/agents.py b/libs/superagent/app/api/agents.py index e4fa20476..3fd9c8b3e 100644 --- a/libs/superagent/app/api/agents.py +++ b/libs/superagent/app/api/agents.py @@ -620,8 +620,6 @@ async def send_message( logger.error(f"Error parsing output: {e}") output["output"] = {} - output = json.dumps(output) - return {"success": True, "data": output} diff --git a/libs/superagent/app/workflows/base.py b/libs/superagent/app/workflows/base.py index a85d5931b..ac1d9e3f7 100644 --- a/libs/superagent/app/workflows/base.py +++ b/libs/superagent/app/workflows/base.py @@ -1,4 +1,3 @@ -import json import logging from typing import Any, List @@ -71,7 +70,6 @@ async def arun(self, input: Any): except Exception as e: logger.error(f"Error parsing output: {e}") agent_response["output"] = {} - agent_response["output"] = json.dumps(agent_response["output"]) previous_output = agent_response.get("output") steps_output.append(agent_response) From ed8c502f1ad2676a1001dc6f78d85e0ffb12d74a Mon Sep 17 00:00:00 2001 From: alisalim17 Date: Thu, 18 Apr 2024 22:17:07 +0400 Subject: [PATCH 06/30] add 'MISTRAL' to LLMProvider enum --- .../prisma/migrations/20240418181431_add_mistral/migration.sql | 2 ++ libs/superagent/prisma/schema.prisma | 1 + 2 files changed, 3 insertions(+) create mode 100644 libs/superagent/prisma/migrations/20240418181431_add_mistral/migration.sql diff --git a/libs/superagent/prisma/migrations/20240418181431_add_mistral/migration.sql b/libs/superagent/prisma/migrations/20240418181431_add_mistral/migration.sql new file mode 100644 index 000000000..fa30cbda9 --- /dev/null +++ b/libs/superagent/prisma/migrations/20240418181431_add_mistral/migration.sql @@ -0,0 +1,2 @@ +-- AlterEnum +ALTER TYPE "LLMProvider" ADD VALUE 'MISTRAL'; diff --git a/libs/superagent/prisma/schema.prisma b/libs/superagent/prisma/schema.prisma index 6f03bca21..9c9045e81 100644 --- a/libs/superagent/prisma/schema.prisma +++ b/libs/superagent/prisma/schema.prisma @@ -24,6 +24,7 @@ enum LLMProvider { TOGETHER_AI ANTHROPIC BEDROCK + MISTRAL } enum LLMModel { From 14178963e6829f15dc39ee9318e2a1c9dc121b0d Mon Sep 17 00:00:00 2001 From: alisalim17 Date: Thu, 18 Apr 2024 22:17:24 +0400 Subject: [PATCH 07/30] add Mistral to SAML --- libs/superagent/app/api/workflow_configs/saml_schema.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/libs/superagent/app/api/workflow_configs/saml_schema.py b/libs/superagent/app/api/workflow_configs/saml_schema.py index 837da1242..e1e37f6d1 100644 --- a/libs/superagent/app/api/workflow_configs/saml_schema.py +++ b/libs/superagent/app/api/workflow_configs/saml_schema.py @@ -149,6 +149,7 @@ class LLMAgentTool(BaseAgentToolModel, LLMAgent): LLMProvider.TOGETHER_AI.value, LLMProvider.ANTHROPIC.value, LLMProvider.BEDROCK.value, + LLMProvider.MISTRAL.value, ] @@ -159,6 +160,7 @@ class Workflow(BaseModel): perplexity: Optional[LLMAgent] together_ai: Optional[LLMAgent] bedrock: Optional[LLMAgent] + mistral: Optional[LLMAgent] anthropic: Optional[LLMAgent] llm: Optional[LLMAgent] = Field( description="Deprecated! Use LLM providers instead. e.g. `perplexity` or `together_ai`" From ff859203f2a070eda6aff93038719793146ef5c9 Mon Sep 17 00:00:00 2001 From: alisalim17 Date: Thu, 18 Apr 2024 22:17:45 +0400 Subject: [PATCH 08/30] feat(ui): add Mistral integration support --- libs/ui/app/integrations/llm.tsx | 8 ++++++++ libs/ui/config/site.ts | 13 +++++++++++++ libs/ui/models/models.ts | 1 + 3 files changed, 22 insertions(+) diff --git a/libs/ui/app/integrations/llm.tsx b/libs/ui/app/integrations/llm.tsx index 8f2bf5ba3..670821b99 100644 --- a/libs/ui/app/integrations/llm.tsx +++ b/libs/ui/app/integrations/llm.tsx @@ -54,6 +54,13 @@ const antrophicSchema = z.object({ apiKey: z.string().nonempty("API key is required"), options: z.object({}), }) + +const mistralSchema = z.object({ + llmType: z.literal(LLMProvider.MISTRAL), + apiKey: z.string().nonempty("API key is required"), + options: z.object({}), +}) + const amazonBedrockSchema = z.object({ llmType: z.literal(LLMProvider.BEDROCK), apiKey: z.literal(""), @@ -79,6 +86,7 @@ const formSchema = z.discriminatedUnion("llmType", [ perplexityAiSchema, togetherAiSchema, antrophicSchema, + mistralSchema, amazonBedrockSchema, azureOpenAiSchema, ]) diff --git a/libs/ui/config/site.ts b/libs/ui/config/site.ts index 3d2088966..1f7790487 100644 --- a/libs/ui/config/site.ts +++ b/libs/ui/config/site.ts @@ -523,6 +523,19 @@ export const siteConfig = { }, ], }, + { + disabled: false, + formDescription: "Please enter your Mistral API key.", + provider: LLMProvider.MISTRAL, + name: "Mistral", + metadata: [ + { + key: "apiKey", + type: "input", + label: "Mistral API Key", + }, + ], + }, { disabled: false, formDescription: "Please enter your Azure OpenAI API key.", diff --git a/libs/ui/models/models.ts b/libs/ui/models/models.ts index 414c0758a..34855caa4 100644 --- a/libs/ui/models/models.ts +++ b/libs/ui/models/models.ts @@ -4,6 +4,7 @@ export const LLMProvider = { TOGETHER_AI: "TOGETHER_AI", ANTHROPIC: "ANTHROPIC", BEDROCK: "BEDROCK", + MISTRAL: "MISTRAL", AZURE_OPENAI: "AZURE_OPENAI", } as const From 114fbd554fe05614316e70899d1cc7f746648066 Mon Sep 17 00:00:00 2001 From: alisalim17 Date: Thu, 18 Apr 2024 22:36:55 +0400 Subject: [PATCH 09/30] feat(db): add database schemas --- .../prisma/migrations/20240418183001_add_groq/migration.sql | 2 ++ libs/superagent/prisma/schema.prisma | 1 + 2 files changed, 3 insertions(+) create mode 100644 libs/superagent/prisma/migrations/20240418183001_add_groq/migration.sql diff --git a/libs/superagent/prisma/migrations/20240418183001_add_groq/migration.sql b/libs/superagent/prisma/migrations/20240418183001_add_groq/migration.sql new file mode 100644 index 000000000..62f59f9fc --- /dev/null +++ b/libs/superagent/prisma/migrations/20240418183001_add_groq/migration.sql @@ -0,0 +1,2 @@ +-- AlterEnum +ALTER TYPE "LLMProvider" ADD VALUE 'GROQ'; diff --git a/libs/superagent/prisma/schema.prisma b/libs/superagent/prisma/schema.prisma index 6f03bca21..93354b433 100644 --- a/libs/superagent/prisma/schema.prisma +++ b/libs/superagent/prisma/schema.prisma @@ -24,6 +24,7 @@ enum LLMProvider { TOGETHER_AI ANTHROPIC BEDROCK + GROQ } enum LLMModel { From d4337d86c496fd15efce4b5d5a2ac308d9294455 Mon Sep 17 00:00:00 2001 From: alisalim17 Date: Thu, 18 Apr 2024 22:37:12 +0400 Subject: [PATCH 10/30] feat: add groq to SAML --- libs/superagent/app/api/workflow_configs/saml_schema.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/libs/superagent/app/api/workflow_configs/saml_schema.py b/libs/superagent/app/api/workflow_configs/saml_schema.py index 837da1242..a47f95fa1 100644 --- a/libs/superagent/app/api/workflow_configs/saml_schema.py +++ b/libs/superagent/app/api/workflow_configs/saml_schema.py @@ -149,6 +149,7 @@ class LLMAgentTool(BaseAgentToolModel, LLMAgent): LLMProvider.TOGETHER_AI.value, LLMProvider.ANTHROPIC.value, LLMProvider.BEDROCK.value, + LLMProvider.GROQ.value, ] @@ -159,6 +160,7 @@ class Workflow(BaseModel): perplexity: Optional[LLMAgent] together_ai: Optional[LLMAgent] bedrock: Optional[LLMAgent] + groq: Optional[LLMAgent] anthropic: Optional[LLMAgent] llm: Optional[LLMAgent] = Field( description="Deprecated! Use LLM providers instead. e.g. `perplexity` or `together_ai`" From bc183b584062f39b69de3fa3846fe3994f9a8428 Mon Sep 17 00:00:00 2001 From: alisalim17 Date: Thu, 18 Apr 2024 22:37:30 +0400 Subject: [PATCH 11/30] feat(ui): groq integration --- libs/ui/app/integrations/llm.tsx | 8 ++++++++ libs/ui/config/site.ts | 13 +++++++++++++ libs/ui/models/models.ts | 1 + 3 files changed, 22 insertions(+) diff --git a/libs/ui/app/integrations/llm.tsx b/libs/ui/app/integrations/llm.tsx index 8f2bf5ba3..636550c96 100644 --- a/libs/ui/app/integrations/llm.tsx +++ b/libs/ui/app/integrations/llm.tsx @@ -54,6 +54,13 @@ const antrophicSchema = z.object({ apiKey: z.string().nonempty("API key is required"), options: z.object({}), }) + +const groqSchema = z.object({ + llmType: z.literal(LLMProvider.GROQ), + apiKey: z.string().nonempty("API key is required"), + options: z.object({}), +}) + const amazonBedrockSchema = z.object({ llmType: z.literal(LLMProvider.BEDROCK), apiKey: z.literal(""), @@ -79,6 +86,7 @@ const formSchema = z.discriminatedUnion("llmType", [ perplexityAiSchema, togetherAiSchema, antrophicSchema, + groqSchema, amazonBedrockSchema, azureOpenAiSchema, ]) diff --git a/libs/ui/config/site.ts b/libs/ui/config/site.ts index 3d2088966..139c22599 100644 --- a/libs/ui/config/site.ts +++ b/libs/ui/config/site.ts @@ -500,6 +500,19 @@ export const siteConfig = { }, ], }, + { + disabled: false, + formDescription: "Please enter your Groq API key.", + provider: LLMProvider.GROQ, + name: "Groq", + metadata: [ + { + key: "apiKey", + type: "input", + label: "Groq API Key", + }, + ], + }, { disabled: false, formDescription: "Please enter your AWS credentials.", diff --git a/libs/ui/models/models.ts b/libs/ui/models/models.ts index 414c0758a..0b783e1b6 100644 --- a/libs/ui/models/models.ts +++ b/libs/ui/models/models.ts @@ -4,6 +4,7 @@ export const LLMProvider = { TOGETHER_AI: "TOGETHER_AI", ANTHROPIC: "ANTHROPIC", BEDROCK: "BEDROCK", + GROQ: "GROQ", AZURE_OPENAI: "AZURE_OPENAI", } as const From 0db604e28f9291c77d50638935e38d09978bf212 Mon Sep 17 00:00:00 2001 From: Stefano Lottini Date: Fri, 19 Apr 2024 13:18:48 +0200 Subject: [PATCH 12/30] (Astra DB) add caller name as User-Agent header in HTTP requests to Astra DB API (#968) --- libs/superagent/app/vectorstores/astra_client.py | 1 + 1 file changed, 1 insertion(+) diff --git a/libs/superagent/app/vectorstores/astra_client.py b/libs/superagent/app/vectorstores/astra_client.py index 61d2763ad..a060e0fbc 100644 --- a/libs/superagent/app/vectorstores/astra_client.py +++ b/libs/superagent/app/vectorstores/astra_client.py @@ -45,6 +45,7 @@ def __init__( self.request_header = { "x-cassandra-token": self.astra_application_token, "Content-Type": "application/json", + "User-Agent": "superagent", } self.create_url = f"https://{self.astra_id}-{self.astra_region}.apps.astra.datastax.com/api/json/v1/{self.keyspace_name}" From e158619522715764cef178c0b2b2a0941afcad4c Mon Sep 17 00:00:00 2001 From: Ali Salimli <67149699+elisalimli@users.noreply.github.com> Date: Fri, 19 Apr 2024 15:33:22 +0400 Subject: [PATCH 13/30] Mistral Integration (#969) * add 'MISTRAL' to LLMProvider enum * add Mistral to SAML * feat(ui): add Mistral integration support --- .../app/api/workflow_configs/saml_schema.py | 2 ++ .../20240418181431_add_mistral/migration.sql | 2 ++ libs/superagent/prisma/schema.prisma | 1 + libs/ui/app/integrations/llm.tsx | 8 ++++++++ libs/ui/config/site.ts | 13 +++++++++++++ libs/ui/models/models.ts | 1 + 6 files changed, 27 insertions(+) create mode 100644 libs/superagent/prisma/migrations/20240418181431_add_mistral/migration.sql diff --git a/libs/superagent/app/api/workflow_configs/saml_schema.py b/libs/superagent/app/api/workflow_configs/saml_schema.py index 837da1242..e1e37f6d1 100644 --- a/libs/superagent/app/api/workflow_configs/saml_schema.py +++ b/libs/superagent/app/api/workflow_configs/saml_schema.py @@ -149,6 +149,7 @@ class LLMAgentTool(BaseAgentToolModel, LLMAgent): LLMProvider.TOGETHER_AI.value, LLMProvider.ANTHROPIC.value, LLMProvider.BEDROCK.value, + LLMProvider.MISTRAL.value, ] @@ -159,6 +160,7 @@ class Workflow(BaseModel): perplexity: Optional[LLMAgent] together_ai: Optional[LLMAgent] bedrock: Optional[LLMAgent] + mistral: Optional[LLMAgent] anthropic: Optional[LLMAgent] llm: Optional[LLMAgent] = Field( description="Deprecated! Use LLM providers instead. e.g. `perplexity` or `together_ai`" diff --git a/libs/superagent/prisma/migrations/20240418181431_add_mistral/migration.sql b/libs/superagent/prisma/migrations/20240418181431_add_mistral/migration.sql new file mode 100644 index 000000000..fa30cbda9 --- /dev/null +++ b/libs/superagent/prisma/migrations/20240418181431_add_mistral/migration.sql @@ -0,0 +1,2 @@ +-- AlterEnum +ALTER TYPE "LLMProvider" ADD VALUE 'MISTRAL'; diff --git a/libs/superagent/prisma/schema.prisma b/libs/superagent/prisma/schema.prisma index 6f03bca21..9c9045e81 100644 --- a/libs/superagent/prisma/schema.prisma +++ b/libs/superagent/prisma/schema.prisma @@ -24,6 +24,7 @@ enum LLMProvider { TOGETHER_AI ANTHROPIC BEDROCK + MISTRAL } enum LLMModel { diff --git a/libs/ui/app/integrations/llm.tsx b/libs/ui/app/integrations/llm.tsx index 8f2bf5ba3..670821b99 100644 --- a/libs/ui/app/integrations/llm.tsx +++ b/libs/ui/app/integrations/llm.tsx @@ -54,6 +54,13 @@ const antrophicSchema = z.object({ apiKey: z.string().nonempty("API key is required"), options: z.object({}), }) + +const mistralSchema = z.object({ + llmType: z.literal(LLMProvider.MISTRAL), + apiKey: z.string().nonempty("API key is required"), + options: z.object({}), +}) + const amazonBedrockSchema = z.object({ llmType: z.literal(LLMProvider.BEDROCK), apiKey: z.literal(""), @@ -79,6 +86,7 @@ const formSchema = z.discriminatedUnion("llmType", [ perplexityAiSchema, togetherAiSchema, antrophicSchema, + mistralSchema, amazonBedrockSchema, azureOpenAiSchema, ]) diff --git a/libs/ui/config/site.ts b/libs/ui/config/site.ts index 3d2088966..1f7790487 100644 --- a/libs/ui/config/site.ts +++ b/libs/ui/config/site.ts @@ -523,6 +523,19 @@ export const siteConfig = { }, ], }, + { + disabled: false, + formDescription: "Please enter your Mistral API key.", + provider: LLMProvider.MISTRAL, + name: "Mistral", + metadata: [ + { + key: "apiKey", + type: "input", + label: "Mistral API Key", + }, + ], + }, { disabled: false, formDescription: "Please enter your Azure OpenAI API key.", diff --git a/libs/ui/models/models.ts b/libs/ui/models/models.ts index 414c0758a..34855caa4 100644 --- a/libs/ui/models/models.ts +++ b/libs/ui/models/models.ts @@ -4,6 +4,7 @@ export const LLMProvider = { TOGETHER_AI: "TOGETHER_AI", ANTHROPIC: "ANTHROPIC", BEDROCK: "BEDROCK", + MISTRAL: "MISTRAL", AZURE_OPENAI: "AZURE_OPENAI", } as const From e27d0ce7dc468bb66223b4c3bd40fca56be09a54 Mon Sep 17 00:00:00 2001 From: Ali Salimli <67149699+elisalimli@users.noreply.github.com> Date: Fri, 19 Apr 2024 16:12:15 +0400 Subject: [PATCH 14/30] =?UTF-8?q?=E2=9A=A1=EF=B8=8FGroq=20Integration=20(#?= =?UTF-8?q?970)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * feat(db): add database schemas * feat: add groq to SAML * feat(ui): groq integration --- .../app/api/workflow_configs/saml_schema.py | 2 ++ libs/superagent/app/vectorstores/astra_client.py | 1 - .../20240418183001_add_groq/migration.sql | 2 ++ libs/superagent/prisma/schema.prisma | 1 + libs/ui/app/integrations/llm.tsx | 7 +++++++ libs/ui/config/site.ts | 13 +++++++++++++ libs/ui/models/models.ts | 1 + 7 files changed, 26 insertions(+), 1 deletion(-) create mode 100644 libs/superagent/prisma/migrations/20240418183001_add_groq/migration.sql diff --git a/libs/superagent/app/api/workflow_configs/saml_schema.py b/libs/superagent/app/api/workflow_configs/saml_schema.py index e1e37f6d1..022f74315 100644 --- a/libs/superagent/app/api/workflow_configs/saml_schema.py +++ b/libs/superagent/app/api/workflow_configs/saml_schema.py @@ -149,6 +149,7 @@ class LLMAgentTool(BaseAgentToolModel, LLMAgent): LLMProvider.TOGETHER_AI.value, LLMProvider.ANTHROPIC.value, LLMProvider.BEDROCK.value, + LLMProvider.GROQ.value, LLMProvider.MISTRAL.value, ] @@ -160,6 +161,7 @@ class Workflow(BaseModel): perplexity: Optional[LLMAgent] together_ai: Optional[LLMAgent] bedrock: Optional[LLMAgent] + groq: Optional[LLMAgent] mistral: Optional[LLMAgent] anthropic: Optional[LLMAgent] llm: Optional[LLMAgent] = Field( diff --git a/libs/superagent/app/vectorstores/astra_client.py b/libs/superagent/app/vectorstores/astra_client.py index a060e0fbc..61d2763ad 100644 --- a/libs/superagent/app/vectorstores/astra_client.py +++ b/libs/superagent/app/vectorstores/astra_client.py @@ -45,7 +45,6 @@ def __init__( self.request_header = { "x-cassandra-token": self.astra_application_token, "Content-Type": "application/json", - "User-Agent": "superagent", } self.create_url = f"https://{self.astra_id}-{self.astra_region}.apps.astra.datastax.com/api/json/v1/{self.keyspace_name}" diff --git a/libs/superagent/prisma/migrations/20240418183001_add_groq/migration.sql b/libs/superagent/prisma/migrations/20240418183001_add_groq/migration.sql new file mode 100644 index 000000000..62f59f9fc --- /dev/null +++ b/libs/superagent/prisma/migrations/20240418183001_add_groq/migration.sql @@ -0,0 +1,2 @@ +-- AlterEnum +ALTER TYPE "LLMProvider" ADD VALUE 'GROQ'; diff --git a/libs/superagent/prisma/schema.prisma b/libs/superagent/prisma/schema.prisma index 9c9045e81..7ed8e81cd 100644 --- a/libs/superagent/prisma/schema.prisma +++ b/libs/superagent/prisma/schema.prisma @@ -24,6 +24,7 @@ enum LLMProvider { TOGETHER_AI ANTHROPIC BEDROCK + GROQ MISTRAL } diff --git a/libs/ui/app/integrations/llm.tsx b/libs/ui/app/integrations/llm.tsx index 670821b99..345a248f8 100644 --- a/libs/ui/app/integrations/llm.tsx +++ b/libs/ui/app/integrations/llm.tsx @@ -55,6 +55,12 @@ const antrophicSchema = z.object({ options: z.object({}), }) +const groqSchema = z.object({ + llmType: z.literal(LLMProvider.GROQ), + apiKey: z.string().nonempty("API key is required"), + options: z.object({}), +}) + const mistralSchema = z.object({ llmType: z.literal(LLMProvider.MISTRAL), apiKey: z.string().nonempty("API key is required"), @@ -86,6 +92,7 @@ const formSchema = z.discriminatedUnion("llmType", [ perplexityAiSchema, togetherAiSchema, antrophicSchema, + groqSchema, mistralSchema, amazonBedrockSchema, azureOpenAiSchema, diff --git a/libs/ui/config/site.ts b/libs/ui/config/site.ts index 1f7790487..5ae16b424 100644 --- a/libs/ui/config/site.ts +++ b/libs/ui/config/site.ts @@ -500,6 +500,19 @@ export const siteConfig = { }, ], }, + { + disabled: false, + formDescription: "Please enter your Groq API key.", + provider: LLMProvider.GROQ, + name: "Groq", + metadata: [ + { + key: "apiKey", + type: "input", + label: "Groq API Key", + }, + ], + }, { disabled: false, formDescription: "Please enter your AWS credentials.", diff --git a/libs/ui/models/models.ts b/libs/ui/models/models.ts index 34855caa4..fa39a8d86 100644 --- a/libs/ui/models/models.ts +++ b/libs/ui/models/models.ts @@ -4,6 +4,7 @@ export const LLMProvider = { TOGETHER_AI: "TOGETHER_AI", ANTHROPIC: "ANTHROPIC", BEDROCK: "BEDROCK", + GROQ: "GROQ", MISTRAL: "MISTRAL", AZURE_OPENAI: "AZURE_OPENAI", } as const From cc5bb2f2d3e3a0837487bc4e7b024717968240e5 Mon Sep 17 00:00:00 2001 From: Ismail Pelaseyed Date: Fri, 19 Apr 2024 14:13:48 +0200 Subject: [PATCH 15/30] Update API version --- libs/superagent/app/main.py | 2 +- libs/superagent/pyproject.toml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/libs/superagent/app/main.py b/libs/superagent/app/main.py index bd5bfd13c..0491ecfb5 100644 --- a/libs/superagent/app/main.py +++ b/libs/superagent/app/main.py @@ -36,7 +36,7 @@ title="Superagent", docs_url="/", description="🥷 Run AI-agents with an API", - version="0.2.29", + version="0.2.32", servers=[{"url": config("SUPERAGENT_API_URL")}], ) diff --git a/libs/superagent/pyproject.toml b/libs/superagent/pyproject.toml index 63c75df77..4f468981c 100644 --- a/libs/superagent/pyproject.toml +++ b/libs/superagent/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "superagent" -version = "0.2.29" +version = "0.2.32" description = "🥷 Run AI-agents with an API" authors = ["Ismail Pelaseyed"] readme = "../../README.md" From c30bc1a1a2e1214984e88066047bb09ecdf944c8 Mon Sep 17 00:00:00 2001 From: Ismail Pelaseyed Date: Fri, 19 Apr 2024 14:21:39 +0200 Subject: [PATCH 16/30] Update OpenAPI specs --- fern/apis/prod/openapi/openapi.yaml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/fern/apis/prod/openapi/openapi.yaml b/fern/apis/prod/openapi/openapi.yaml index 5bca52af4..3b8cdd66a 100644 --- a/fern/apis/prod/openapi/openapi.yaml +++ b/fern/apis/prod/openapi/openapi.yaml @@ -2,7 +2,7 @@ openapi: 3.0.2 info: title: Superagent description: 🥷 Run AI-agents with an API - version: 0.2.30 + version: 0.2.32 servers: - url: https://api.beta.superagent.sh paths: @@ -195,8 +195,8 @@ paths: $ref: '#/components/schemas/HTTPValidationError' security: - HTTPBearer: [] - x-fern-sdk-group-name: agent x-fern-sdk-method-name: invoke + x-fern-sdk-group-name: agent /api/v1/agents/{agent_id}/llms: post: tags: @@ -1810,6 +1810,8 @@ components: - TOGETHER_AI - ANTHROPIC - BEDROCK + - GROQ + - MISTRAL type: string description: An enumeration. OpenAiAssistantParameters: From a60cb8abc1710ea4627ff98d8af63a93699f4c6a Mon Sep 17 00:00:00 2001 From: alisalim17 Date: Sat, 20 Apr 2024 01:12:45 +0400 Subject: [PATCH 17/30] fix: add JSON serialization to SuperRagTool query response --- libs/superagent/app/tools/superrag.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/libs/superagent/app/tools/superrag.py b/libs/superagent/app/tools/superrag.py index d312b3560..f1dfdcec1 100644 --- a/libs/superagent/app/tools/superrag.py +++ b/libs/superagent/app/tools/superrag.py @@ -1,3 +1,4 @@ +import json import logging from langchain_community.tools import BaseTool @@ -57,7 +58,7 @@ async def _arun( credentials = get_superrag_compatible_credentials(provider.options) - return self.superrag_service.query( + res = self.superrag_service.query( { "vector_database": {"type": database_provider, "config": credentials}, "index_name": index_name, @@ -67,3 +68,4 @@ async def _arun( "interpreter_mode": interpreter_mode, } ) + return json.dumps(res) From 97e0958fcc6a0d6c860015637be856b66bace2d7 Mon Sep 17 00:00:00 2001 From: alisalim17 Date: Sat, 20 Apr 2024 12:44:40 +0400 Subject: [PATCH 18/30] feat(db): add cohere migration --- .../prisma/migrations/20240420075553_add_cohere/migration.sql | 2 ++ libs/superagent/prisma/schema.prisma | 1 + 2 files changed, 3 insertions(+) create mode 100644 libs/superagent/prisma/migrations/20240420075553_add_cohere/migration.sql diff --git a/libs/superagent/prisma/migrations/20240420075553_add_cohere/migration.sql b/libs/superagent/prisma/migrations/20240420075553_add_cohere/migration.sql new file mode 100644 index 000000000..3cb444fb4 --- /dev/null +++ b/libs/superagent/prisma/migrations/20240420075553_add_cohere/migration.sql @@ -0,0 +1,2 @@ +-- AlterEnum +ALTER TYPE "LLMProvider" ADD VALUE 'COHERE_CHAT'; \ No newline at end of file diff --git a/libs/superagent/prisma/schema.prisma b/libs/superagent/prisma/schema.prisma index 7ed8e81cd..c3125752d 100644 --- a/libs/superagent/prisma/schema.prisma +++ b/libs/superagent/prisma/schema.prisma @@ -26,6 +26,7 @@ enum LLMProvider { BEDROCK GROQ MISTRAL + COHERE_CHAT } enum LLMModel { From 28f9451a94f2e9c6ae3b30d44a11f49b4c51da79 Mon Sep 17 00:00:00 2001 From: alisalim17 Date: Sat, 20 Apr 2024 12:45:05 +0400 Subject: [PATCH 19/30] feat(saml): add cohere to SAML --- libs/superagent/app/api/workflow_configs/saml_schema.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/libs/superagent/app/api/workflow_configs/saml_schema.py b/libs/superagent/app/api/workflow_configs/saml_schema.py index 022f74315..01253e758 100644 --- a/libs/superagent/app/api/workflow_configs/saml_schema.py +++ b/libs/superagent/app/api/workflow_configs/saml_schema.py @@ -151,6 +151,7 @@ class LLMAgentTool(BaseAgentToolModel, LLMAgent): LLMProvider.BEDROCK.value, LLMProvider.GROQ.value, LLMProvider.MISTRAL.value, + LLMProvider.COHERE_CHAT.value, ] @@ -163,6 +164,7 @@ class Workflow(BaseModel): bedrock: Optional[LLMAgent] groq: Optional[LLMAgent] mistral: Optional[LLMAgent] + cohere_chat: Optional[LLMAgent] = Field(alias="cohere") anthropic: Optional[LLMAgent] llm: Optional[LLMAgent] = Field( description="Deprecated! Use LLM providers instead. e.g. `perplexity` or `together_ai`" From c357e75b1cda8ea17ea3930b5fdcc6139b18b0f3 Mon Sep 17 00:00:00 2001 From: alisalim17 Date: Sat, 20 Apr 2024 12:45:44 +0400 Subject: [PATCH 20/30] feat(ui): add cohere integration --- libs/ui/app/integrations/llm.tsx | 7 +++++++ libs/ui/config/site.ts | 13 +++++++++++++ libs/ui/models/models.ts | 1 + 3 files changed, 21 insertions(+) diff --git a/libs/ui/app/integrations/llm.tsx b/libs/ui/app/integrations/llm.tsx index 345a248f8..c375763cb 100644 --- a/libs/ui/app/integrations/llm.tsx +++ b/libs/ui/app/integrations/llm.tsx @@ -67,6 +67,12 @@ const mistralSchema = z.object({ options: z.object({}), }) +const cohereSchema = z.object({ + llmType: z.literal(LLMProvider.COHERE_CHAT), + apiKey: z.string().nonempty("API key is required"), + options: z.object({}), +}) + const amazonBedrockSchema = z.object({ llmType: z.literal(LLMProvider.BEDROCK), apiKey: z.literal(""), @@ -94,6 +100,7 @@ const formSchema = z.discriminatedUnion("llmType", [ antrophicSchema, groqSchema, mistralSchema, + cohereSchema, amazonBedrockSchema, azureOpenAiSchema, ]) diff --git a/libs/ui/config/site.ts b/libs/ui/config/site.ts index 5ae16b424..f65099579 100644 --- a/libs/ui/config/site.ts +++ b/libs/ui/config/site.ts @@ -549,6 +549,19 @@ export const siteConfig = { }, ], }, + { + disabled: false, + formDescription: "Please enter your Cohere API key.", + provider: LLMProvider.COHERE_CHAT, + name: "Cohere", + metadata: [ + { + key: "apiKey", + type: "input", + label: "Cohere API Key", + }, + ], + }, { disabled: false, formDescription: "Please enter your Azure OpenAI API key.", diff --git a/libs/ui/models/models.ts b/libs/ui/models/models.ts index fa39a8d86..7e0cfc67f 100644 --- a/libs/ui/models/models.ts +++ b/libs/ui/models/models.ts @@ -6,6 +6,7 @@ export const LLMProvider = { BEDROCK: "BEDROCK", GROQ: "GROQ", MISTRAL: "MISTRAL", + COHERE_CHAT: "COHERE_CHAT", AZURE_OPENAI: "AZURE_OPENAI", } as const From 183d6daf6f0351ead0652c9d6940d5d9fae42af7 Mon Sep 17 00:00:00 2001 From: alisalim17 Date: Sat, 20 Apr 2024 12:46:51 +0400 Subject: [PATCH 21/30] feat(ui): add cohere integration --- libs/ui/app/integrations/client-page.tsx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/ui/app/integrations/client-page.tsx b/libs/ui/app/integrations/client-page.tsx index bd6e3c976..e37568a7f 100644 --- a/libs/ui/app/integrations/client-page.tsx +++ b/libs/ui/app/integrations/client-page.tsx @@ -15,7 +15,7 @@ export default function IntegrationsClientPage({ configuredLLMs: any }) { return ( - + STORAGE From f99d8357bb055ee0cae121039d464ec151712cb2 Mon Sep 17 00:00:00 2001 From: alisalim17 Date: Mon, 22 Apr 2024 22:10:30 +0400 Subject: [PATCH 22/30] fix: indentation issue in CustomAsyncIteratorCallbackHandler --- libs/superagent/app/utils/callbacks.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/libs/superagent/app/utils/callbacks.py b/libs/superagent/app/utils/callbacks.py index 02171ac24..21bc63770 100644 --- a/libs/superagent/app/utils/callbacks.py +++ b/libs/superagent/app/utils/callbacks.py @@ -42,7 +42,7 @@ async def on_agent_finish(self, finish: AgentFinish, **_: Any) -> Any: while not self.queue.empty(): await asyncio.sleep(0.1) - self.done.set() + self.done.set() async def on_llm_start(self, *_: Any, **__: Any) -> None: # If two calls are made in a row, this resets the state @@ -93,6 +93,7 @@ async def aiter(self) -> AsyncIterator[str]: if token_or_done is True: continue self.is_stream_started = True + yield token_or_done From 95206acb42dafe928ee3455dcceb218d8481157b Mon Sep 17 00:00:00 2001 From: alisalim17 Date: Mon, 22 Apr 2024 22:15:09 +0400 Subject: [PATCH 23/30] refactor: remove hypen from function name regex --- libs/superagent/app/tools/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/libs/superagent/app/tools/__init__.py b/libs/superagent/app/tools/__init__.py index 6c6fcebdc..5fa5a73cb 100644 --- a/libs/superagent/app/tools/__init__.py +++ b/libs/superagent/app/tools/__init__.py @@ -150,14 +150,14 @@ def conform_function_name(url): """ Validates OpenAI function names and modifies them to conform to the regex """ - regex_pattern = r"^[a-zA-Z0-9_-]{1,64}$" + regex_pattern = r"^[A-Za-z0-9_]{1,64}$" # Check if the URL matches the regex if re.match(regex_pattern, url): return url # URL is already valid else: # Modify the URL to conform to the regex - valid_url = re.sub(r"[^a-zA-Z0-9_-]", "", url)[:64] + valid_url = re.sub(r"[^A-Za-z0-9_]", "", url)[:64] return valid_url From 13d26c915f1a70a611b63da541978f6c0fbee4d9 Mon Sep 17 00:00:00 2001 From: alisalim17 Date: Mon, 22 Apr 2024 22:15:44 +0400 Subject: [PATCH 24/30] feat: add native function calling --- libs/superagent/app/agents/base.py | 52 ++++- libs/superagent/app/agents/langchain.py | 2 +- libs/superagent/app/agents/llm.py | 244 +++++++++++++++++++++--- 3 files changed, 260 insertions(+), 38 deletions(-) diff --git a/libs/superagent/app/agents/base.py b/libs/superagent/app/agents/base.py index e00786ef0..305a9cb8a 100644 --- a/libs/superagent/app/agents/base.py +++ b/libs/superagent/app/agents/base.py @@ -6,7 +6,7 @@ from app.models.request import LLMParams as LLMParamsRequest from app.utils.callbacks import CustomAsyncIteratorCallbackHandler -from prisma.enums import AgentType +from prisma.enums import AgentType, LLMProvider from prisma.models import LLM, Agent @@ -21,9 +21,21 @@ class LLMParams(BaseModel): class LLMData(BaseModel): llm: LLM params: LLMParams + model: str class AgentBase(ABC): + _input: str + _messages: list = [] + prompt: Any + tools: Any + session_id: str + enable_streaming: bool + output_schema: str + callbacks: List[CustomAsyncIteratorCallbackHandler] + agent_data: Agent + llm_data: LLMData + def __init__( self, session_id: str, @@ -40,10 +52,6 @@ def __init__( self.llm_data = llm_data self.agent_data = agent_data - _input: str - prompt: Any - tools: Any - @property def input(self): return self._input @@ -52,6 +60,14 @@ def input(self): def input(self, value: str): self._input = value + @property + def messages(self): + return self._messages + + @messages.setter + def messages(self, value: list): + self._messages = value + @property @abstractmethod def prompt(self) -> Any: @@ -95,7 +111,31 @@ def llm_data(self): **(params), } - return LLMData(llm=llm, params=LLMParams.parse_obj(options)) + params = LLMParams( + temperature=options.get("temperature"), + max_tokens=options.get("max_tokens"), + aws_access_key_id=( + options.get("aws_access_key_id") + if llm.provider == LLMProvider.BEDROCK + else None + ), + aws_secret_access_key=( + options.get("aws_secret_access_key") + if llm.provider == LLMProvider.BEDROCK + else None + ), + aws_region_name=( + options.get("aws_region_name") + if llm.provider == LLMProvider.BEDROCK + else None + ), + ) + + return LLMData( + llm=llm, + params=LLMParams.parse_obj(options), + model=self.agent_data.llmModel or self.agent_data.metadata.get("model"), + ) async def get_agent(self): if self.agent_data.type == AgentType.OPENAI_ASSISTANT: diff --git a/libs/superagent/app/agents/langchain.py b/libs/superagent/app/agents/langchain.py index 73754b58c..c5fc39e95 100644 --- a/libs/superagent/app/agents/langchain.py +++ b/libs/superagent/app/agents/langchain.py @@ -46,7 +46,7 @@ def _get_llm(self): if llm_data.llm.provider == LLMProvider.OPENAI: return ChatOpenAI( - model=LLM_MAPPING[self.agent_data.llmModel], + model=LLM_MAPPING[self.llm_data.model], openai_api_key=llm_data.llm.apiKey, streaming=self.enable_streaming, callbacks=self.callbacks, diff --git a/libs/superagent/app/agents/llm.py b/libs/superagent/app/agents/llm.py index 647a92760..d3dfb3e32 100644 --- a/libs/superagent/app/agents/llm.py +++ b/libs/superagent/app/agents/llm.py @@ -7,7 +7,7 @@ from langchain_core.agents import AgentActionMessageLog from langchain_core.messages import AIMessage from langchain_core.utils.function_calling import convert_to_openai_function -from litellm import acompletion, completion +from litellm import completion from app.agents.base import AgentBase from app.tools import get_tools @@ -25,9 +25,9 @@ async def call_tool( agent_data: Agent, session_id: str, function: Any ) -> tuple[AgentActionMessageLog, Any]: - name = function.name + name = function.get("name") try: - args = json.loads(function.arguments) + args = json.loads(function.get("arguments")) except Exception as e: logger.error(f"Error parsing function arguments for {name}: {e}") raise e @@ -44,8 +44,12 @@ async def call_tool( if not tool_to_call: raise Exception(f"Function {name} not found in tools") + logging.info(f"Calling tool {name} with arguments {args}") + res = await tool_to_call._arun(**args) + logging.info(f"Tool {name} returned {res}") + return ( AgentActionMessageLog( tool=name, @@ -70,7 +74,14 @@ async def call_tool( class LLMAgent(AgentBase): @property def tools(self): - pass + tools = get_tools( + agent_data=self.agent_data, + session_id=self.session_id, + ) + return [ + {"type": "function", "function": convert_to_openai_function(tool)} + for tool in tools + ] @property def prompt(self): @@ -90,9 +101,157 @@ def prompt(self): return prompt - @property - def messages(self): - return [ + async def get_agent(self): + if self.llm_data.llm.provider in [ + LLMProvider.ANTHROPIC, + LLMProvider.MISTRAL, + LLMProvider.GROQ, + LLMProvider.BEDROCK, + ]: + logger.info("Using native function calling") + return AgentExecutor(**self.__dict__) + + return AgentExecutorOpenAIFunc(*self.__dict__) + + +class AgentExecutor(LLMAgent): + """Agent Executor for LLM (with native function calling)""" + + NOT_TOOLS_STREAMING_SUPPORTED_PROVIDERS = [ + LLMProvider.GROQ, + LLMProvider.BEDROCK, + ] + + intermediate_steps = [] + + async def _execute_tool_calls(self, tool_calls: list[dict], **kwargs): + messages: list = kwargs.get("messages") + for tool_call in tool_calls: + try: + intermediate_step = await call_tool( + agent_data=self.agent_data, + session_id=self.session_id, + function=tool_call.get("function"), + ) + self.intermediate_steps.append(intermediate_step) + except Exception as e: + logger.error( + f"Error calling function {tool_call.get('function').get('name')}: {e}" + ) + continue + (_, tool_res) = intermediate_step + new_message = { + "role": "tool", + "name": tool_call.get("function").get("name"), + "content": tool_res, + } + if tool_call.get("id"): + new_message["tool_call_id"] = tool_call.get("id") + + messages.append(new_message) + + self.messages = messages + kwargs["messages"] = self.messages + return await self._completion(**kwargs) + + def _cleanup_output(self, output): + # anthropic returns a XML formatted response + # we need to get the content between tags + if self.llm_data.llm.provider == LLMProvider.ANTHROPIC: + from xmltodict import parse as xml_parse + + xml_output = "" + output + "" + output = xml_parse(xml_output) + output = output["root"] + if "result" in output: + output = output.get("result") + else: + output = output.get("#text") + return output + + def _transform_completion_to_streaming(self, res, **kwargs): + # hacky way to convert non-streaming response to streaming response + if not kwargs.get("stream"): + for choice in res.choices: + choice.delta = choice.message + res = [res] + return res + + async def _stream_lines_by_lines(self, output: str): + output_by_lines = output.split("\n") + if len(output_by_lines) > 1: + for line in output_by_lines: + await self.streaming_callback.on_llm_new_token(line) + await self.streaming_callback.on_llm_new_token("\n") + else: + await self.streaming_callback.on_llm_new_token(output_by_lines[0]) + + async def _completion(self, **kwargs) -> Any: + logger.info(f"Calling LLM with kwargs: {kwargs}") + new_messages = self.messages + + if kwargs.get("stream"): + await self.streaming_callback.on_llm_start() + + should_stream_directly = ( + self.enable_streaming + and self.llm_data.llm.provider + not in self.NOT_TOOLS_STREAMING_SUPPORTED_PROVIDERS + and self.llm_data.llm.provider != LLMProvider.ANTHROPIC + ) + + # TODO: Remove this when Groq and Bedrock supports streaming with tools + if self.llm_data.llm.provider in self.NOT_TOOLS_STREAMING_SUPPORTED_PROVIDERS: + logger.info( + f"Disabling streaming for {self.llm_data.llm.provider}, as tools are used" + ) + kwargs["stream"] = False + + res = completion(**kwargs) + res = self._transform_completion_to_streaming(res, **kwargs) + + tool_calls = [] + output = "" + + for chunk in res: + new_message = chunk.choices[0].delta.dict() + # clean up tool calls + if new_message.get("tool_calls"): + new_message["role"] = "assistant" + new_tool_calls = new_message.get("tool_calls", []) + for tool_call in new_tool_calls: + tool_call["type"] = "function" + if "index" in tool_call: + del tool_call["index"] + + new_messages.append(new_message) + tool_calls.extend(new_tool_calls) + + content = new_message.get("content", "") + + if content: + output += content + if should_stream_directly: + await self.streaming_callback.on_llm_new_token(content) + + self.messages = new_messages + + if tool_calls: + return await self._execute_tool_calls(tool_calls, **kwargs) + + output = self._cleanup_output(output) + + if not should_stream_directly: + await self._stream_lines_by_lines(output) + + if self.enable_streaming: + self.streaming_callback.done.set() + + return output + + async def ainvoke(self, input, *_, **kwargs): + self.input = input + self.messages = [ { "content": self.prompt, "role": "system", @@ -103,22 +262,33 @@ def messages(self): }, ] - async def get_agent(self): - agent_executor = LLMAgentOpenAIFunctionCallingExecutor(**self.__dict__) - return agent_executor + if self.enable_streaming: + for callback in kwargs["config"]["callbacks"]: + if isinstance(callback, CustomAsyncIteratorCallbackHandler): + self.streaming_callback = callback + if not self.streaming_callback: + raise Exception("Streaming Callback not found") -class LLMAgentOpenAIFunctionCallingExecutor(LLMAgent): - @property - def tools(self): - tools = get_tools( - agent_data=self.agent_data, - session_id=self.session_id, + output = await self._completion( + model=self.llm_data.model, + api_key=self.llm_data.llm.apiKey, + messages=self.messages, + tools=self.tools if len(self.tools) > 0 else None, + tool_choice="auto" if len(self.tools) > 0 else None, + stream=self.enable_streaming, + **self.llm_data.params.dict(exclude_unset=True), ) - return [ - {"type": "function", "function": convert_to_openai_function(tool)} - for tool in tools - ] + + return { + "intermediate_steps": self.intermediate_steps, + "input": self.input, + "output": output, + } + + +class AgentExecutorOpenAIFunc(LLMAgent): + """Agent Executor that binded with OpenAI Function Calling""" @property def messages_function_calling(self): @@ -133,10 +303,22 @@ def messages_function_calling(self): }, ] + @property + def messages(self): + return [ + { + "content": self.prompt, + "role": "system", + }, + { + "content": self.input, + "role": "user", + }, + ] + async def ainvoke(self, input, *_, **kwargs): self.input = input - model = self.agent_data.metadata.get("model", "gpt-3.5-turbo-0125") - tool_responses = [] + tool_results = [] if len(self.tools) > 0: openai_llm = await prisma.llm.find_first( @@ -154,11 +336,11 @@ async def ainvoke(self, input, *_, **kwargs): ) res = completion( + api_key=openai_api_key, model="gpt-3.5-turbo-0125", messages=self.messages_function_calling, tools=self.tools, stream=False, - api_key=openai_api_key, ) tool_calls = res.choices[0].message.get("tool_calls", []) @@ -167,28 +349,28 @@ async def ainvoke(self, input, *_, **kwargs): res = await call_tool( agent_data=self.agent_data, session_id=self.session_id, - function=tool_call.function, + function=tool_call.function.dict(), ) except Exception as e: logger.error( f"Error calling function {tool_call.function.name}: {e}" ) continue - tool_responses.append(res) + tool_results.append(res) - if len(tool_responses) > 0: + if len(tool_results) > 0: INPUT_TEMPLATE = "{input}\n Context: {context}\n" self.input = INPUT_TEMPLATE.format( input=self.input, context="\n\n".join( - [tool_response for (_, tool_response) in tool_responses] + [tool_response for (_, tool_response) in tool_results] ), ) params = self.llm_data.params.dict(exclude_unset=True) - res = await acompletion( + res = completion( api_key=self.llm_data.llm.apiKey, - model=model, + model=self.llm_data.model, messages=self.messages, stream=self.enable_streaming, **params, @@ -205,7 +387,7 @@ async def ainvoke(self, input, *_, **kwargs): raise Exception("Streaming Callback not found") await streaming_callback.on_llm_start() - async for chunk in res: + for chunk in res: token = chunk.choices[0].delta.content if token: output += token @@ -216,7 +398,7 @@ async def ainvoke(self, input, *_, **kwargs): output = res.choices[0].message.content return { - "intermediate_steps": tool_responses, + "intermediate_steps": tool_results, "input": self.input, "output": output, } From 430dddb833dd9b7adfc6f0dac99b2e1a2ca53c90 Mon Sep 17 00:00:00 2001 From: alisalim17 Date: Mon, 22 Apr 2024 22:16:52 +0400 Subject: [PATCH 25/30] refactor: update SAML configuration to use 'browser tool' instead of 'browser' in workflow prompts --- libs/superagent/app/api/workflow_configs/saml_schema.py | 2 +- libs/superagent/app/models/tools.py | 4 ++-- libs/ui/config/saml.ts | 4 ++-- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/libs/superagent/app/api/workflow_configs/saml_schema.py b/libs/superagent/app/api/workflow_configs/saml_schema.py index 01253e758..decc22b97 100644 --- a/libs/superagent/app/api/workflow_configs/saml_schema.py +++ b/libs/superagent/app/api/workflow_configs/saml_schema.py @@ -164,7 +164,7 @@ class Workflow(BaseModel): bedrock: Optional[LLMAgent] groq: Optional[LLMAgent] mistral: Optional[LLMAgent] - cohere_chat: Optional[LLMAgent] = Field(alias="cohere") + cohere_chat: Optional[LLMAgent] anthropic: Optional[LLMAgent] llm: Optional[LLMAgent] = Field( description="Deprecated! Use LLM providers instead. e.g. `perplexity` or `together_ai`" diff --git a/libs/superagent/app/models/tools.py b/libs/superagent/app/models/tools.py index bc6d365f6..faa63ef62 100644 --- a/libs/superagent/app/models/tools.py +++ b/libs/superagent/app/models/tools.py @@ -1,6 +1,6 @@ from typing import Optional -from pydantic import BaseModel +from pydantic import BaseModel, Field class AlgoliaInput(BaseModel): @@ -61,7 +61,7 @@ class E2BCodeExecutorInput(BaseModel): class BrowserInput(BaseModel): - url: str + url: str = Field(..., description="A valid url including protocol to analyze") class GPTVisionInputModel(BaseModel): diff --git a/libs/ui/config/saml.ts b/libs/ui/config/saml.ts index 9e92d2045..156c3298c 100644 --- a/libs/ui/config/saml.ts +++ b/libs/ui/config/saml.ts @@ -7,7 +7,7 @@ workflows: name: Browser assistant intro: |- 👋 Hi there! How can I help search for answers on the internet. - prompt: Use the browser to answer any questions + prompt: Use the browser tool to answer any questions tools: - browser: name: browser @@ -22,7 +22,7 @@ workflows: - superagent: name: Browser assistant llm: gpt-3.5-turbo-16k-0613 - prompt: Use the browser to answer all questions + prompt: Use the browser tool to answer all questions intro: 👋 Hi there! How can I help you? tools: - browser: From a7c60b46a9fb46fa0d834706d5d83ef4ec5d7228 Mon Sep 17 00:00:00 2001 From: alisalim17 Date: Wed, 24 Apr 2024 12:24:44 +0400 Subject: [PATCH 26/30] deps: upgrade litellm from version 1.35.8 to 1.35.21 --- libs/superagent/poetry.lock | 8 ++++---- libs/superagent/pyproject.toml | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/libs/superagent/poetry.lock b/libs/superagent/poetry.lock index a5f2389a3..9857cdffe 100644 --- a/libs/superagent/poetry.lock +++ b/libs/superagent/poetry.lock @@ -2311,13 +2311,13 @@ requests = ">=2,<3" [[package]] name = "litellm" -version = "1.35.2" +version = "1.35.21" description = "Library to easily interface with LLM API providers" optional = false python-versions = "!=2.7.*,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,!=3.7.*,>=3.8" files = [ - {file = "litellm-1.35.2-py3-none-any.whl", hash = "sha256:686ee040154d7062b0078d882fa6399c5c7cc5ec9b5266490dee68f1b8905a36"}, - {file = "litellm-1.35.2.tar.gz", hash = "sha256:062e5be75196da7348ae0c4f60d396f0b23ee874708ed81c40f7675161213385"}, + {file = "litellm-1.35.21-py3-none-any.whl", hash = "sha256:907230b7ff57c853e32d04274c2bb01f75e77d49220bd3d4d8fa02cfe6d3492a"}, + {file = "litellm-1.35.21.tar.gz", hash = "sha256:be0f9452fa357996e194c88eebc94f742be2fa623afd137a91b1e60ce5c3821f"}, ] [package.dependencies] @@ -6025,4 +6025,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = "^3.8.1, <3.12" -content-hash = "c390f22730e24482e7f42cc8140d339a1025fb1e25021c6d886cd4dafab3a622" +content-hash = "9049d2eda40cf7a7809de8eeac32efac0753e9006d50b8ef98aca4ef75f0e703" diff --git a/libs/superagent/pyproject.toml b/libs/superagent/pyproject.toml index 4f468981c..7854e3041 100644 --- a/libs/superagent/pyproject.toml +++ b/libs/superagent/pyproject.toml @@ -50,7 +50,7 @@ openai = "^1.1.1" langchain-experimental = "^0.0.37" pydub = "^0.25.1" algoliasearch = "^3.0.0" -litellm = "1.35.2" +litellm = "1.35.21" weaviate-client = "^3.25.3" qdrant-client = "^1.6.9" vecs = "^0.4.2" From fcd791df6e5d0a70bdbefad50d00e19b8d662830 Mon Sep 17 00:00:00 2001 From: alisalim17 Date: Wed, 24 Apr 2024 16:34:18 +0400 Subject: [PATCH 27/30] fix: handle claude 3 haiku output --- libs/superagent/app/agents/llm.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/libs/superagent/app/agents/llm.py b/libs/superagent/app/agents/llm.py index d3dfb3e32..4adec9e2a 100644 --- a/libs/superagent/app/agents/llm.py +++ b/libs/superagent/app/agents/llm.py @@ -163,10 +163,14 @@ def _cleanup_output(self, output): xml_output = "" + output + "" output = xml_parse(xml_output) output = output["root"] - if "result" in output: - output = output.get("result") + + if isinstance(output, str): + return output else: - output = output.get("#text") + if "result" in output: + output = output.get("result") + else: + output = output.get("#text") return output def _transform_completion_to_streaming(self, res, **kwargs): From bf6cd15990c6d04ad63c4e7ea7082232c25af87d Mon Sep 17 00:00:00 2001 From: alisalim17 Date: Wed, 24 Apr 2024 16:53:49 +0400 Subject: [PATCH 28/30] fix: passing tool error to LLM instead of moving on --- libs/superagent/app/agents/llm.py | 80 ++++++++++++++----------------- 1 file changed, 37 insertions(+), 43 deletions(-) diff --git a/libs/superagent/app/agents/llm.py b/libs/superagent/app/agents/llm.py index 4adec9e2a..bfcc73ecb 100644 --- a/libs/superagent/app/agents/llm.py +++ b/libs/superagent/app/agents/llm.py @@ -46,29 +46,35 @@ async def call_tool( logging.info(f"Calling tool {name} with arguments {args}") - res = await tool_to_call._arun(**args) + action_log = AgentActionMessageLog( + tool=name, + tool_input=args, + log=f"\nInvoking: `{name}` with `{args}`\n\n\n", + message_log=[ + AIMessage( + content="", + additional_kwargs={ + "function_call": { + "arguments": args, + "name": name, + } + }, + ) + ], + ) + + try: + res = await tool_to_call._arun(**args) + except Exception as e: + logging.error(f"Error calling tool {name}: {e}") + return ( + action_log, + f"Error calling {tool_to_call.name} tool with arguments {args}: {e}", + ) logging.info(f"Tool {name} returned {res}") - return ( - AgentActionMessageLog( - tool=name, - tool_input=args, - log=f"\nInvoking: `{name}` with `{args}`\n\n\n", - message_log=[ - AIMessage( - content="", - additional_kwargs={ - "function_call": { - "arguments": args, - "name": name, - } - }, - ) - ], - ), - res, - ) + return (action_log, res) class LLMAgent(AgentBase): @@ -127,18 +133,12 @@ class AgentExecutor(LLMAgent): async def _execute_tool_calls(self, tool_calls: list[dict], **kwargs): messages: list = kwargs.get("messages") for tool_call in tool_calls: - try: - intermediate_step = await call_tool( - agent_data=self.agent_data, - session_id=self.session_id, - function=tool_call.get("function"), - ) - self.intermediate_steps.append(intermediate_step) - except Exception as e: - logger.error( - f"Error calling function {tool_call.get('function').get('name')}: {e}" - ) - continue + intermediate_step = await call_tool( + agent_data=self.agent_data, + session_id=self.session_id, + function=tool_call.get("function"), + ) + self.intermediate_steps.append(intermediate_step) (_, tool_res) = intermediate_step new_message = { "role": "tool", @@ -349,17 +349,11 @@ async def ainvoke(self, input, *_, **kwargs): tool_calls = res.choices[0].message.get("tool_calls", []) for tool_call in tool_calls: - try: - res = await call_tool( - agent_data=self.agent_data, - session_id=self.session_id, - function=tool_call.function.dict(), - ) - except Exception as e: - logger.error( - f"Error calling function {tool_call.function.name}: {e}" - ) - continue + res = await call_tool( + agent_data=self.agent_data, + session_id=self.session_id, + function=tool_call.function.dict(), + ) tool_results.append(res) if len(tool_results) > 0: From 3dec17980abfe1a55218c978f4366a54186a5632 Mon Sep 17 00:00:00 2001 From: alisalim17 Date: Mon, 29 Apr 2024 11:08:10 +0400 Subject: [PATCH 29/30] refactor: LLMAgent's get_agent method to use native function calling if tool calling is supported --- libs/superagent/app/agents/llm.py | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/libs/superagent/app/agents/llm.py b/libs/superagent/app/agents/llm.py index bfcc73ecb..d533b6002 100644 --- a/libs/superagent/app/agents/llm.py +++ b/libs/superagent/app/agents/llm.py @@ -7,7 +7,7 @@ from langchain_core.agents import AgentActionMessageLog from langchain_core.messages import AIMessage from langchain_core.utils.function_calling import convert_to_openai_function -from litellm import completion +from litellm import completion, get_llm_provider, get_supported_openai_params from app.agents.base import AgentBase from app.tools import get_tools @@ -107,17 +107,21 @@ def prompt(self): return prompt + @property + def _is_tool_calling_supported(self): + (model, custom_llm_provider, _, _) = get_llm_provider(self.llm_data.model) + supported_params = get_supported_openai_params( + model=model, custom_llm_provider=custom_llm_provider + ) + + return "tools" in supported_params + async def get_agent(self): - if self.llm_data.llm.provider in [ - LLMProvider.ANTHROPIC, - LLMProvider.MISTRAL, - LLMProvider.GROQ, - LLMProvider.BEDROCK, - ]: + if self._is_tool_calling_supported: logger.info("Using native function calling") return AgentExecutor(**self.__dict__) - return AgentExecutorOpenAIFunc(*self.__dict__) + return AgentExecutorOpenAIFunc(**self.__dict__) class AgentExecutor(LLMAgent): From a0f4560d0233b3fb624970813d1f8dff506a8d80 Mon Sep 17 00:00:00 2001 From: alisalim17 Date: Mon, 29 Apr 2024 11:53:30 +0400 Subject: [PATCH 30/30] feat: add return_direct support in LLMAgent --- libs/superagent/app/agents/llm.py | 64 ++++++++++++++++++++----------- 1 file changed, 41 insertions(+), 23 deletions(-) diff --git a/libs/superagent/app/agents/llm.py b/libs/superagent/app/agents/llm.py index d533b6002..9836e1fec 100644 --- a/libs/superagent/app/agents/llm.py +++ b/libs/superagent/app/agents/llm.py @@ -62,19 +62,15 @@ async def call_tool( ) ], ) - + tool_res = None try: - res = await tool_to_call._arun(**args) + tool_res = await tool_to_call._arun(**args) + logging.info(f"Tool {name} returned {tool_res}") except Exception as e: + tool_res = f"Error calling {tool_to_call.name} tool with arguments {args}: {e}" logging.error(f"Error calling tool {name}: {e}") - return ( - action_log, - f"Error calling {tool_to_call.name} tool with arguments {args}: {e}", - ) - logging.info(f"Tool {name} returned {res}") - - return (action_log, res) + return (action_log, tool_res, tool_to_call.return_direct) class LLMAgent(AgentBase): @@ -116,6 +112,15 @@ def _is_tool_calling_supported(self): return "tools" in supported_params + async def _stream_by_lines(self, output: str): + output_by_lines = output.split("\n") + if len(output_by_lines) > 1: + for line in output_by_lines: + await self.streaming_callback.on_llm_new_token(line) + await self.streaming_callback.on_llm_new_token("\n") + else: + await self.streaming_callback.on_llm_new_token(output_by_lines[0]) + async def get_agent(self): if self._is_tool_calling_supported: logger.info("Using native function calling") @@ -142,8 +147,8 @@ async def _execute_tool_calls(self, tool_calls: list[dict], **kwargs): session_id=self.session_id, function=tool_call.get("function"), ) - self.intermediate_steps.append(intermediate_step) - (_, tool_res) = intermediate_step + (action_log, tool_res, return_direct) = intermediate_step + self.intermediate_steps.append((action_log, tool_res)) new_message = { "role": "tool", "name": tool_call.get("function").get("name"), @@ -153,6 +158,11 @@ async def _execute_tool_calls(self, tool_calls: list[dict], **kwargs): new_message["tool_call_id"] = tool_call.get("id") messages.append(new_message) + if return_direct: + if self.enable_streaming: + await self._stream_by_lines(tool_res) + self.streaming_callback.done.set() + return tool_res self.messages = messages kwargs["messages"] = self.messages @@ -185,15 +195,6 @@ def _transform_completion_to_streaming(self, res, **kwargs): res = [res] return res - async def _stream_lines_by_lines(self, output: str): - output_by_lines = output.split("\n") - if len(output_by_lines) > 1: - for line in output_by_lines: - await self.streaming_callback.on_llm_new_token(line) - await self.streaming_callback.on_llm_new_token("\n") - else: - await self.streaming_callback.on_llm_new_token(output_by_lines[0]) - async def _completion(self, **kwargs) -> Any: logger.info(f"Calling LLM with kwargs: {kwargs}") new_messages = self.messages @@ -250,7 +251,7 @@ async def _completion(self, **kwargs) -> Any: output = self._cleanup_output(output) if not should_stream_directly: - await self._stream_lines_by_lines(output) + await self._stream_by_lines(output) if self.enable_streaming: self.streaming_callback.done.set() @@ -327,6 +328,13 @@ def messages(self): async def ainvoke(self, input, *_, **kwargs): self.input = input tool_results = [] + if self.enable_streaming: + for callback in kwargs["config"]["callbacks"]: + if isinstance(callback, CustomAsyncIteratorCallbackHandler): + self.streaming_callback = callback + + if not self.streaming_callback: + raise Exception("Streaming Callback not found") if len(self.tools) > 0: openai_llm = await prisma.llm.find_first( @@ -353,12 +361,22 @@ async def ainvoke(self, input, *_, **kwargs): tool_calls = res.choices[0].message.get("tool_calls", []) for tool_call in tool_calls: - res = await call_tool( + (action_log, tool_res, return_direct) = await call_tool( agent_data=self.agent_data, session_id=self.session_id, function=tool_call.function.dict(), ) - tool_results.append(res) + tool_results.append((action_log, tool_res)) + if return_direct: + if self.enable_streaming: + await self._stream_by_lines(tool_res) + self.streaming_callback.done.set() + + return { + "intermediate_steps": tool_results, + "input": self.input, + "output": tool_res, + } if len(tool_results) > 0: INPUT_TEMPLATE = "{input}\n Context: {context}\n"