diff --git a/.azure/pipelines/build.yaml b/.azure/pipelines/build.yaml new file mode 100644 index 000000000000..7ce3f0c1efa9 --- /dev/null +++ b/.azure/pipelines/build.yaml @@ -0,0 +1,94 @@ +trigger: + branches: + include: + - main + paths: + include: + - dotnet + +schedules: +- cron: "0 0 * * *" + displayName: 'Daily midnight build (including CodeQL)' + branches: + include: + - main + always: true + +parameters: + - name: build_configuration + displayName: Build configuration + type: string + default: Release + values: + - Release + - Debug + - name: include_suffix + displayName: Append version suffix + type: boolean + default: true + - name: version_suffix + displayName: Version suffix + type: string + default: dev.$(Build.BuildNumber) + - name: codesign + displayName: Enable code signing + type: boolean + default: false + - name: skip_test + displayName: Skip tests + type: boolean + default: false + - name: publish_nuget + displayName: Publish to nuget.org + type: boolean + default: false + - name: publish_nightly + displayName: Publish to autogen-nightly + type: boolean + default: true + - name: publish_artifacts + displayName: Publish artifacts + type: boolean + default: false + - name: runCodeQL3000 + default: false + displayName: Run CodeQL3000 tasks + type: boolean + +variables: +- template: templates/vars.yaml + +resources: + repositories: + - repository: 1ESPipelineTemplates + type: git + name: 1ESPipelineTemplates/1ESPipelineTemplates + ref: refs/tags/release + +extends: + ${{ if eq(variables['System.TeamProject'], 'GitHub - PR Builds') }}: + template: v1/1ES.Unofficial.PipelineTemplate.yml@1ESPipelineTemplates + ${{ else }}: + template: v1/1ES.Official.PipelineTemplate.yml@1ESPipelineTemplates + parameters: + settings: + skipBuildTagsForGitHubPullRequests: true + pool: + name: $(pool_name) + image: $(pool_image) + os: windows + stages: + - stage: build_test + displayName: Build and Tests + jobs: + - template: /.azure/pipelines/templates/build.yaml@self + parameters: + build_configuration: ${{ parameters.build_configuration }} + include_suffix: ${{ parameters.include_suffix }} + version_suffix: ${{ parameters.version_suffix }} + codesign: ${{ parameters.codesign }} + skip_test: ${{ parameters.skip_test }} + publish_nightly: ${{ parameters.publish_nightly }} + publish_nuget: ${{ parameters.publish_nuget }} + runCodeQL3000: ${{ parameters.runCodeQL3000 }} + publish_artifacts: ${{ parameters.publish_artifacts }} \ No newline at end of file diff --git a/.azure/pipelines/templates/build.yaml b/.azure/pipelines/templates/build.yaml new file mode 100644 index 000000000000..0b7dbe990c38 --- /dev/null +++ b/.azure/pipelines/templates/build.yaml @@ -0,0 +1,228 @@ +parameters: + - name: build_configuration + displayName: Build configuration + type: string + default: Release + values: + - Release + - Debug + - name: include_suffix + displayName: Append version suffix + type: boolean + default: true + - name: version_suffix + displayName: Version suffix + type: string + default: ci.$(Build.BuildNumber) + - name: codesign + displayName: Enable code signing + type: boolean + default: false + - name: skip_test + displayName: Skip tests + type: boolean + default: false + - name: publish_nightly + displayName: Publish to autogen-nightly + type: boolean + default: false + - name: publish_nuget + displayName: Publish to nuget.org + type: boolean + default: false + - name: publish_artifacts + displayName: Publish artifacts + type: boolean + default: false + - name: runCodeQL3000 + default: false + displayName: Run CodeQL3000 tasks + type: boolean + +jobs: + +# Build, sign dlls, build nuget pkgs, then sign them +- job: Build + displayName: Build and create NuGet packages + variables: + publishVstsFeed: 'AGPublic/AutoGen-Nightly' + ${{ if eq(parameters.codesign, true) }}: + esrp_signing: true + ${{ else }}: + esrp_signing: false + ${{ if ne(variables['System.TeamProject'], 'GitHub - PR Builds') }}: + templateContext: + outputs: + # Publish artifacts if enabled + - ${{ if eq(parameters.publish_artifacts, true) }}: # TODO add eq(parameters.codesign, true) + - output: pipelineArtifact + targetPath: '$(build.sourcesdirectory)/dotnet/artifacts' + artifactName: artifacts folder + # Publish packages to nightly + - ${{ if eq(parameters.publish_nightly, true) }}: # TODO add eq(parameters.codesign, true) + - output: nuget + useDotNetTask: false + packageParentPath: $(Pipeline.Workspace) + packagesToPush: $(build.sourcesdirectory)/dotnet/artifacts/**/*.nupkg;$(build.sourcesdirectory)/dotnet/artifacts/**/*.snupkg + nuGetFeedType: internal + publishVstsFeed: $(publishVstsFeed) + allowPackageConflicts: true + - ${{ if and(eq(parameters.codesign, true), eq(parameters.publish_nuget, true)) }}: + - output: nuget + condition: succeeded() + useDotNetTask: false + packageParentPath: $(Pipeline.Workspace) + packagesToPush: $(build.sourcesdirectory)/dotnet/artifacts/**/*.nupkg;$(build.sourcesdirectory)/dotnet/artifacts/**/*.snupkg + nuGetFeedType: external + publishFeedCredentials: dotnet-orleans-nuget + publishPackageMetadata: true + allowPackageConflicts: true + steps: + - checkout: self + - task: UseDotNet@2 + displayName: 'Use .NET Core sdk' + inputs: + useGlobalJson: true + workingDirectory: $(Build.SourcesDirectory)/dotnet + - task: Bash@3 + displayName: Install .NET Aspire workload + inputs: + targetType: 'inline' + script: | + dotnet nuget locals all --clear + dotnet workload install aspire + - ${{ if eq(variables.runCodeQL3000, 'true') }}: + - task: CodeQL3000Init@0 + displayName: CodeQL Initialize + # This task only tags a build if it actually does CodeQL3000 work. + # Those tasks no-op while the analysis is considered up to date i.e. for runs w/in a few days of each other. + - script: "echo ##vso[build.addbuildtag]CodeQL3000" + displayName: 'Set CI CodeQL3000 tag' + condition: ne(variables.CODEQL_DIST,'') + - task: DotNetCoreCLI@2 + displayName: Build + inputs: + command: build + arguments: '$(build_flags) /bl:${{parameters.build_configuration}}-Build.binlog /p:Configuration=${{parameters.build_configuration}} $(solution)' + workingDirectory: $(Build.SourcesDirectory)/dotnet + env: + ${{ if and(eq(parameters.include_suffix, true), eq(parameters.publish_nuget, false)) }}: + VersionSuffix: ${{parameters.version_suffix}} + OfficialBuild: $(official_build) + + - ${{ if eq(variables.runCodeQL3000, 'true') }}: + - task: CodeQL3000Finalize@0 + displayName: CodeQL Finalize + # DLL code signing + - ${{ if eq(variables.esrp_signing, true) }}: + - task: UseDotNet@2 + displayName: 'Codesign: Use .NET Core' + inputs: + packageType: runtime + version: $(codesign_runtime) + - task: CopyFiles@2 + displayName: 'Codesign: Copy Files for signing' + inputs: + SourceFolder: '$(build.sourcesdirectory)' + Contents: | + src/**/bin/${{parameters.build_configuration}}/**/AutoGen*.dll + src/**/bin/${{parameters.build_configuration}}/**/Microsoft.AutoGen.*.dll + TargetFolder: '$(build.artifactstagingdirectory)\codesign' + CleanTargetFolder: true + - task: SFP.build-tasks.custom-build-task-1.EsrpCodeSigning@1 + displayName: 'Codesign: ESRP CodeSigning' + inputs: + ConnectedServiceName: 'CodeSign Service (NuGet)' + FolderPath: '$(build.artifactstagingdirectory)\codesign' + Pattern: '*' + signConfigType: inlineSignParams + inlineOperation: | + [ + { + "keyCode": "CP-230012", + "operationSetCode": "SigntoolSign", + "parameters": [ + { + "parameterName": "OpusName", + "parameterValue": "Microsoft" + }, + { + "parameterName": "OpusInfo", + "parameterValue": "http://www.microsoft.com" + }, + { + "parameterName": "FileDigest", + "parameterValue": "/fd \"SHA256\"" + }, + { + "parameterName": "PageHash", + "parameterValue": "/NPH" + }, + { + "parameterName": "TimeStamp", + "parameterValue": "/tr \"http://rfc3161.gtm.corp.microsoft.com/TSS/HttpTspServer\" /td sha256" + } + ], + "toolName": "sign", + "toolVersion": "1.0" + }, + { + "keyCode": "CP-230012", + "operationSetCode": "SigntoolVerify", + "parameters": [ ], + "toolName": "sign", + "toolVersion": "1.0" + } + ] + SessionTimeout: 180 + VerboseLogin: true + - task: CopyFiles@2 + displayName: 'Codesign: Copy Signed Files Back' + inputs: + SourceFolder: '$(build.artifactstagingdirectory)\codesign' + Contents: '**\*' + TargetFolder: '$(build.sourcesdirectory)' + OverWrite: true + # End DLL code signing + - task: CmdLine@2 + displayName: Pack + inputs: + script: 'dotnet pack --no-build --no-restore $(build_flags) /bl:${{parameters.build_configuration}}-Pack.binlog /p:Configuration=${{parameters.build_configuration}} $(solution)' + workingDirectory: $(Build.SourcesDirectory)/dotnet + env: + ${{ if and(eq(parameters.include_suffix, true), eq(parameters.publish_nuget, false)) }}: + VersionSuffix: ${{parameters.version_suffix}} + OfficialBuild: $(official_build) + # NuGet code signing + - ${{ if eq(variables.esrp_signing, true) }}: + - task: UseDotNet@2 + displayName: 'Codesign: Use .NET Core' + inputs: + packageType: runtime + version: $(codesign_runtime) + - task: SFP.build-tasks.custom-build-task-1.EsrpCodeSigning@1 + displayName: 'Codesign: ESRP CodeSigning (nuget)' + inputs: + ConnectedServiceName: 'CodeSign Service (NuGet)' + FolderPath: '$(build.sourcesdirectory)/Artifacts/${{parameters.build_configuration}}' + Pattern: '*.nupkg' + signConfigType: inlineSignParams + inlineOperation: | + [ + { + "keyCode": "CP-401405", + "operationSetCode": "NuGetSign", + "parameters": [], + "toolName": "sign", + "toolVersion": "1.0" + }, + { + "keyCode": "CP-401405", + "operationSetCode": "NuGetVerify", + "parameters": [ ], + "toolName": "sign", + "toolVersion": "1.0" + } + ] + SessionTimeout: 180 + VerboseLogin: true diff --git a/.azure/pipelines/templates/vars.yaml b/.azure/pipelines/templates/vars.yaml new file mode 100644 index 000000000000..0b735a02500f --- /dev/null +++ b/.azure/pipelines/templates/vars.yaml @@ -0,0 +1,34 @@ +# It seems that variables must be defined in their own file when using templates + +variables: + build_flags: ' /m /v:m' + solution: 'AutoGen.sln' + codesign_runtime: '2.1.x' + GDN_SUPPRESS_FORKED_BUILD_WARNING: true # Avoid warning "Guardian is not supported for builds from forked GitHub repositories" + MicroBuildOutputFolderOverride: '$(Agent.TempDirectory)' + # Auto-injection is not necessary because the tasks are explicitly included where they're enabled. + Codeql.SkipTaskAutoInjection: true + ${{ if eq(variables['System.TeamProject'], 'GitHub - PR Builds') }}: + pool_name: '1es-agpublish-pool' + pool_image: 'agpublish-agent-image' + official_build: false + ${{ else }}: + ${{ if eq(variables['System.TeamProject'], 'internal') }}: + pool_name: '1es-agpublish-pool' + pool_image: 'agpublish-agent-image' + ${{ else }}: + pool_name: '1es-agpublish-pool' + pool_image: 'agpublish-agent-image' + official_build: true + # Do not let CodeQL3000 Extension gate scan frequency. + Codeql.Cadence: 0 + # Enable CodeQL3000 unconditionally so it may be run on any branch. + Codeql.Enabled: true + # Ignore test and infrastructure code. + Codeql.SourceRoot: src + # CodeQL3000 needs this plumbed along as a variable to enable TSA. Don't use TSA in manual builds. + Codeql.TSAEnabled: ${{ eq(variables['Build.Reason'], 'Schedule') }} + # Default expects tsaoptions.json under SourceRoot. + Codeql.TSAOptionsPath: '$(Build.SourcesDirectory)/.config/tsaoptions.json' + # Do not slow builds down w/ the CodeQL3000 tasks unless this is a nightly build or it's requested. + runCodeQL3000: ${{ or(eq(variables['Build.Reason'], 'Schedule'), and(eq(variables['Build.Reason'], 'Manual'), eq(parameters.runCodeQL3000, 'true'))) }} \ No newline at end of file diff --git a/.github/workflows/checks.yml b/.github/workflows/checks.yml index b6d8b112c22f..703c07494251 100644 --- a/.github/workflows/checks.yml +++ b/.github/workflows/checks.yml @@ -153,6 +153,27 @@ jobs: poe --directory ${{ matrix.package }} docs-check working-directory: ./python + docs-example-check: + runs-on: ubuntu-latest + strategy: + matrix: + package: ["./packages/autogen-core"] + steps: + - uses: actions/checkout@v4 + - uses: astral-sh/setup-uv@v3 + with: + enable-cache: true + - uses: actions/setup-python@v5 + with: + python-version: "3.11" + - run: uv sync --locked --all-extras + working-directory: ./python + - name: Run task + run: | + source ${{ github.workspace }}/python/.venv/bin/activate + poe --directory ${{ matrix.package }} docs-check-examples + working-directory: ./python + check-proto-changes-python: runs-on: ubuntu-latest steps: @@ -169,6 +190,7 @@ jobs: run: | source ${{ github.workspace }}/python/.venv/bin/activate poe gen-proto + poe gen-test-proto working-directory: ./python - name: Check if there are uncommited changes id: changes diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 0218a370535a..57ceb261fdec 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -39,6 +39,7 @@ jobs: { ref: "v0.4.0.dev4", dest-dir: "0.4.0.dev4" }, { ref: "v0.4.0.dev5", dest-dir: "0.4.0.dev5" }, { ref: "v0.4.0.dev6", dest-dir: "0.4.0.dev6" }, + { ref: "v0.4.0.dev7", dest-dir: "0.4.0.dev7" }, ] steps: - name: Checkout diff --git a/.gitignore b/.gitignore index 6a3db84248c3..448fc1e13b1f 100644 --- a/.gitignore +++ b/.gitignore @@ -194,4 +194,7 @@ samples/apps/autogen-studio/autogenstudio/models/test/ notebook/coding +# dotnet artifacts +artifacts + certs/** \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 411a8da3f3c6..fadb8091a0d6 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -24,10 +24,62 @@ This project has adopted the [Microsoft Open Source Code of Conduct](https://ope For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. -## Roadmaps +## Running CI checks locally -To see what we are working on and what we plan to work on, please check our -[Roadmap Issues](https://aka.ms/autogen-roadmap). +It is important to use `uv` when running CI checks locally as it ensures that the correct dependencies and versions are used. + +Please follow the instructions [here](./python/README.md#setup) to get set up. + +For common tasks that are helpful during development and run in CI, see [here](./python/README.md#common-tasks). + +## Roadmap + +We use GitHub issues and milestones to track our roadmap. You can view the upcoming milestones [here]([Roadmap Issues](https://aka.ms/autogen-roadmap). + +## Versioning + +The set of `autogen-*` packages are generally all versioned together. When a change is made to one package, all packages are updated to the same version. This is to ensure that all packages are in sync with each other. + +We will update verion numbers according to the following rules: + +- Increase minor version (0.X.0) upon breaking changes +- Increase patch version (0.0.X) upon new features or bug fixes + +## Release process + +1. Create a PR that updates the version numbers across the codebase ([example](https://github.com/microsoft/autogen/pull/4359)) + 2. The docs CI will fail for the PR, but this is expected and will be resolved in the next step +2. After merging the PR, create and push a tag that corresponds to the new verion. For example, for `0.4.0.dev7`: + - `git tag 0.4.0.dev7 && git push origin 0.4.0.dev7` +3. Restart the docs CI by finding the failed [job corresponding to the `push` event](https://github.com/microsoft/autogen/actions/workflows/docs.yml) and restarting all jobs +4. Run [this](https://github.com/microsoft/autogen/actions/workflows/single-python-package.yml) workflow for each of the packages that need to be released and get an approval for the release for it to run + +## Triage process + +To help ensure the health of the project and community the AutoGen committers have a weekly triage process to ensure that all issues and pull requests are reviewed and addressed in a timely manner. The following documents the responsibilites while on triage duty: + +- Issues + - Review all new issues - these will be tagged with [`needs-triage`](https://github.com/microsoft/autogen/issues?q=is%3Aissue%20state%3Aopen%20label%3Aneeds-triage). + - Apply appropriate labels: + - One of `proj-*` labels based on the project the issue is related to + - `documentation`: related to documentation + - `x-lang`: related to cross language functionality + - `dotnet`: related to .NET + - Add the issue to a relevant milestone if necessary + - If you can resolve the issue or reply to the OP please do. + - If you cannot resolve the issue, assign it to the appropriate person. + - If awaiting a reply add the tag `awaiting-op-response` (this will be auto removed when the OP replies). + - Bonus: there is a backlog of old issues that need to be reviewed - if you have time, review these as well and close or refresh as many as you can. +- PRs + - The UX on GH flags all recently updated PRs. Draft PRs can be ignored, otherwise review all recently updated PRs. + - If a PR is ready for review and you can provide one please go ahead. If you cant, please assign someone. You can quickly spin up a codespace with the PR to test it out. + - If a PR is needing a reply from the op, please tag it `awaiting-op-response`. + - If a PR is approved and passes CI, its ready to merge, please do so. + - If it looks like there is a possibly transient CI failure, re-run failed jobs. +- Discussions + - Look for recently updated discussions and reply as needed or find someone on the team to reply. +- Security + - Look through any securty alerts and file issues or dismiss as needed. ## Becoming a Reviewer diff --git a/README.md b/README.md index 71509427170e..2cb9b266d835 100644 --- a/README.md +++ b/README.md @@ -3,16 +3,19 @@
AutoGen Logo -[![Twitter](https://img.shields.io/twitter/url/https/twitter.com/cloudposse.svg?style=social&label=Follow%20%40pyautogen)](https://twitter.com/pyautogen) +[![Twitter](https://img.shields.io/twitter/url/https/twitter.com/cloudposse.svg?style=social&label=Follow%20%40pyautogen)](https://twitter.com/pyautogen) [![GitHub Discussions](https://img.shields.io/badge/Discussions-Q%26A-green?logo=github)](https://github.com/microsoft/autogen/discussions) [![0.2 Docs](https://img.shields.io/badge/Docs-0.2-blue)](https://microsoft.github.io/autogen/0.2/) [![0.4 Docs](https://img.shields.io/badge/Docs-0.4-blue)](https://microsoft.github.io/autogen/dev/) +[![PyPi autogen-core](https://img.shields.io/badge/PyPi-autogen--core-blue?logo=pypi)](https://pypi.org/project/autogen-core/0.4.0.dev7/) [![PyPi autogen-agentchat](https://img.shields.io/badge/PyPi-autogen--agentchat-blue?logo=pypi)](https://pypi.org/project/autogen-agentchat/0.4.0.dev7/) [![PyPi autogen-ext](https://img.shields.io/badge/PyPi-autogen--ext-blue?logo=pypi)](https://pypi.org/project/autogen-ext/0.4.0.dev7/) +
# AutoGen > [!IMPORTANT] -> +> - (11/14/24) ⚠️ In response to a number of asks to clarify and distinguish between official AutoGen and its forks that created confusion, we issued a [clarification statement](https://github.com/microsoft/autogen/discussions/4217). > - (10/13/24) Interested in the standard AutoGen as a prior user? Find it at the actively-maintained *AutoGen* [0.2 branch](https://github.com/microsoft/autogen/tree/0.2) and `autogen-agentchat~=0.2` PyPi package. > - (10/02/24) [AutoGen 0.4](https://microsoft.github.io/autogen/dev) is a from-the-ground-up rewrite of AutoGen. Learn more about the history, goals and future at [this blog post](https://microsoft.github.io/autogen/blog). We’re excited to work with the community to gather feedback, refine, and improve the project before we officially release 0.4. This is a big change, so AutoGen 0.2 is still available, maintained, and developed in the [0.2 branch](https://github.com/microsoft/autogen/tree/0.2). +> - *[Join us for Community Office Hours](https://github.com/microsoft/autogen/discussions/4059)* We will host a weekly open discussion to answer questions, talk about Roadmap, etc. AutoGen is an open-source framework for building AI agent systems. It simplifies the creation of event-driven, distributed, scalable, and resilient agentic applications. @@ -101,7 +104,7 @@ We look forward to your contributions! First install the packages: ```bash -pip install 'autogen-agentchat==0.4.0.dev6' 'autogen-ext[openai]==0.4.0.dev6' +pip install 'autogen-agentchat==0.4.0.dev7' 'autogen-ext[openai]==0.4.0.dev7' ``` The following code uses OpenAI's GPT-4o model and you need to provide your diff --git a/docs/design/01 - Programming Model.md b/docs/design/01 - Programming Model.md index 732a46c6aebe..6bfa9f9766ad 100644 --- a/docs/design/01 - Programming Model.md +++ b/docs/design/01 - Programming Model.md @@ -6,23 +6,27 @@ The programming model is basically publish-subscribe. Agents subscribe to events ## Events Delivered as CloudEvents -Each event in the system is defined using the [CloudEvents Specification](https://cloudevents.io/). This allows for a common event format that can be used across different systems and languages. In CloudEvents, each event has a Context Attributes that must unique *id* (eg a UUID) a *source* (a unique urn or path), a *type* (the namespace of the event - prefixed with a reverse-DNS name. The prefixed domain dictates the organization which defines the semantics of this event type: e.g *com.github.pull_request.opened* or -*com.example.object.deleted.v2*), and optionally fields describing the data schema/content-type or extensions. +Each event in the system is defined using the [CloudEvents Specification](https://cloudevents.io/). This allows for a common event format that can be used across different systems and languages. In CloudEvents, each event has "Context Attributes" that must include: + +1. *id* - A unique id (eg. a UUID). +2. *source* - A URI or URN indicating the event's origin. +3. *type* - The namespace of the event - prefixed with a reverse-DNS name. + - The prefixed domain dictates the organization which defines the semantics of this event type: e.g `com.github.pull_request.opened` or `com.example.object.deleted.v2`), and optionally fields describing the data schema/content-type or extensions. ## Event Handlers -Each agent has a set of event handlers, that are bound to a specific match against a CloudEvents *type*. Event Handlers could match against an exact type or match for a pattern of events of a particular level in the type heirarchy (eg: *com.Microsoft.AutoGen.Agents.System.\** for all Events in the *System* namespace) Each event handler is a function that can change state, call models, access memory, call external tools, emit other events, and flow data to/from other systems. Each event handler can be a simple function or a more complex function that uses a state machine or other control logic. +Each agent has a set of event handlers, that are bound to a specific match against a CloudEvents *type*. Event Handlers could match against an exact type or match for a pattern of events of a particular level in the type heirarchy (eg: `com.Microsoft.AutoGen.Agents.System.*` for all Events in the `System` namespace) Each event handler is a function that can change state, call models, access memory, call external tools, emit other events, and flow data to/from other systems. Each event handler can be a simple function or a more complex function that uses a state machine or other control logic. ## Orchestrating Agents -If is possible to build a functional and scalable agent system that only reacts to external events. In many cases, however, you will want to orchestrate the agents to achieve a specific goal or follow a pre-determined workflow. In this case, you will need to build an orchestrator agent that manages the flow of events between agents. +It is possible to build a functional and scalable agent system that only reacts to external events. In many cases, however, you will want to orchestrate the agents to achieve a specific goal or follow a pre-determined workflow. In this case, you will need to build an orchestrator agent that manages the flow of events between agents. ## Built-in Event Types The AutoGen system comes with a set of built-in event types that are used to manage the system. These include: -* System Events - Events that are used to manage the system itself. These include events for starting and stopping the Agents, sending messages to all agents, and other system-level events. -* ? insert other types here ? +- *System Events* - Events that are used to manage the system itself. These include events for starting and stopping the Agents, sending messages to all agents, and other system-level events. +- *Insert other types here* ## Agent Contracts diff --git a/docs/design/02 - Topics.md b/docs/design/02 - Topics.md index aca577e748fa..7d7149c37c91 100644 --- a/docs/design/02 - Topics.md +++ b/docs/design/02 - Topics.md @@ -17,16 +17,16 @@ This document does not specify RPC/direct messaging A topic is identified by two components (called a `TopicId`): - [`type`](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#type) - represents the type of event that occurs, this is static and defined in code - - SHOULD use reverse domain name notation to avoid naming conflicts. For example: `com.example.my-topic`. + - SHOULD use reverse domain name notation to avoid naming conflicts. For example: `com.example.my-topic`. - [`source`](https://github.com/cloudevents/spec/blob/v1.0.2/cloudevents/spec.md#source-1) - represents where the event originated from, this is dynamic and based on the message itself - - SHOULD be a URI + - SHOULD be a URI Agent instances are identified by two components (called an `AgentId`): - `type` - represents the type of agent, this is static and defined in code - - MUST be a valid identifier as defined [here](https://docs.python.org/3/reference/lexical_analysis.html#identifiers) except that only the ASCII range is allowed + - MUST be a valid identifier as defined [here](https://docs.python.org/3/reference/lexical_analysis.html#identifiers) except that only the ASCII range is allowed - `key` - represents the instance of the agent type for the key - - SHOULD be a URI + - SHOULD be a URI For example: `GraphicDesigner:1234` diff --git a/docs/design/03 - Agent Worker Protocol.md b/docs/design/03 - Agent Worker Protocol.md index 49d9e867191b..81a9b9b7e97a 100644 --- a/docs/design/03 - Agent Worker Protocol.md +++ b/docs/design/03 - Agent Worker Protocol.md @@ -22,7 +22,7 @@ Agents are never explicitly created or destroyed. When a request is received for ## Worker protocol flow -The worker protocol has three phases, following the lifetime of the worker: initiation, operation, and termination. +The worker protocol has three phases, following the lifetime of the worker: initialization, operation, and termination. ### Initialization diff --git a/docs/design/04 - Agent and Topic ID Specs.md b/docs/design/04 - Agent and Topic ID Specs.md index 22a8a08894fb..b0e0e0e94e60 100644 --- a/docs/design/04 - Agent and Topic ID Specs.md +++ b/docs/design/04 - Agent and Topic ID Specs.md @@ -8,23 +8,23 @@ This document describes the structure, constraints, and behavior of Agent IDs an #### type -* Type: `string` -* Description: The agent type is not an agent class. It associates an agent with a specific factory function, which produces instances of agents of the same agent `type`. For example, different factory functions can produce the same agent class but with different constructor perameters. -* Constraints: UTF8 and only contain alphanumeric letters (a-z) and (0-9), or underscores (_). A valid identifier cannot start with a number, or contain any spaces. -* Examples: - * `code_reviewer` - * `WebSurfer` - * `UserProxy` +- Type: `string` +- Description: The agent type is not an agent class. It associates an agent with a specific factory function, which produces instances of agents of the same agent `type`. For example, different factory functions can produce the same agent class but with different constructor perameters. +- Constraints: UTF8 and only contain alphanumeric letters (a-z) and (0-9), or underscores (\_). A valid identifier cannot start with a number, or contain any spaces. +- Examples: + - `code_reviewer` + - `WebSurfer` + - `UserProxy` #### key -* Type: `string` -* Description: The agent key is an instance identifier for the given agent `type` -* Constraints: UTF8 and only contain characters between (inclusive) ascii 32 (space) and 126 (~). -* Examples: - * `default` - * A memory address - * a UUID string +- Type: `string` +- Description: The agent key is an instance identifier for the given agent `type` +- Constraints: UTF8 and only contain characters between (inclusive) ascii 32 (space) and 126 (~). +- Examples: + - `default` + - A memory address + - a UUID string ## Topic ID @@ -32,16 +32,16 @@ This document describes the structure, constraints, and behavior of Agent IDs an #### type -* Type: `string` -* Description: topic type is usually defined by application code to mark the type of messages the topic is for. -* Constraints: UTF8 and only contain alphanumeric letters (a-z) and (0-9), or underscores (_). A valid identifier cannot start with a number, or contain any spaces. -* Examples: - * `GitHub_Issues` +- Type: `string` +- Description: Topic type is usually defined by application code to mark the type of messages the topic is for. +- Constraints: UTF8 and only contain alphanumeric letters (a-z) and (0-9), ':', '=', or underscores (\_). A valid identifier cannot start with a number, or contain any spaces. +- Examples: + - `GitHub_Issues` #### source -* Type: `string` -* Description: Topic source is the unique identifier for a topic within a topic type. It is typically defined by application data. -* Constraints: UTF8 and only contain characters between (inclusive) ascii 32 (space) and 126 (~). -* Examples: - * `github.com/{repo_name}/issues/{issue_number}` \ No newline at end of file +- Type: `string` +- Description: Topic source is the unique identifier for a topic within a topic type. It is typically defined by application data. +- Constraints: UTF8 and only contain characters between (inclusive) ascii 32 (space) and 126 (~). +- Examples: + - `github.com/{repo_name}/issues/{issue_number}` diff --git a/docs/design/readme.md b/docs/design/readme.md new file mode 100644 index 000000000000..6a8221027f75 --- /dev/null +++ b/docs/design/readme.md @@ -0,0 +1,3 @@ +# Docs + +You can find the project documentation [here](https://microsoft.github.io/autogen/dev/). diff --git a/docs/switcher.json b/docs/switcher.json index a4e6748ebea7..5cfbe4c05516 100644 --- a/docs/switcher.json +++ b/docs/switcher.json @@ -41,7 +41,12 @@ { "name": "0.4.0.dev6", "version": "0.4.0.dev6", - "url": "/autogen/0.4.0.dev6/", + "url": "/autogen/0.4.0.dev6/" + }, + { + "name": "0.4.0.dev7", + "version": "0.4.0.dev7", + "url": "/autogen/0.4.0.dev7/", "preferred": true } ] diff --git a/dotnet/.gitignore b/dotnet/.gitignore index 2fc32d9ac7e4..62205af71a07 100644 --- a/dotnet/.gitignore +++ b/dotnet/.gitignore @@ -82,6 +82,7 @@ BenchmarkDotNet.Artifacts/ project.lock.json project.fragment.lock.json artifacts/ +appsettings.Development.json # Tye .tye/ diff --git a/dotnet/.tools/test-aot-compatibility.ps1 b/dotnet/.tools/test-aot-compatibility.ps1 index 071edcd956dc..d70139fd1fd2 100644 --- a/dotnet/.tools/test-aot-compatibility.ps1 +++ b/dotnet/.tools/test-aot-compatibility.ps1 @@ -15,7 +15,7 @@ foreach ($line in $($publishOutput -split "`r`n")) } } -pushd $rootDirectory/test/AutoGen.AotCompatibility.Tests/bin/Release/$targetNetFramework/linux-x64 +pushd $rootDirectory/artifacts/bin/AutoGen.AotCompatibility.Tests/release Write-Host "Executing test App..." ./AutoGen.AotCompatibility.Tests diff --git a/dotnet/AutoGen.sln b/dotnet/AutoGen.sln index e3e64621533d..fafd117f7914 100644 --- a/dotnet/AutoGen.sln +++ b/dotnet/AutoGen.sln @@ -122,15 +122,15 @@ Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "HelloAIAgents", "samples\He EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "HelloAgent", "samples\Hello\HelloAgent\HelloAgent.csproj", "{8F7560CF-EEBB-4333-A69F-838CA40FD85D}" EndProject -Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "AIModelClientHostingExtensions", "src\Microsoft.AutoGen\Extensions\AIModelClientHostingExtensions\AIModelClientHostingExtensions.csproj", "{97550E87-48C6-4EBF-85E1-413ABAE9DBFD}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Microsoft.AutoGen.Extensions.MEAI", "src\Microsoft.AutoGen\Extensions\MEAI\Microsoft.AutoGen.Extensions.MEAI.csproj", "{97550E87-48C6-4EBF-85E1-413ABAE9DBFD}" EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "sample", "sample", "{686480D7-8FEC-4ED3-9C5D-CEBE1057A7ED}" EndProject Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "HelloAgentState", "samples\Hello\HelloAgentState\HelloAgentState.csproj", "{64EF61E7-00A6-4E5E-9808-62E10993A0E5}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Microsoft.AutoGen.ServiceDefaults", "src\Microsoft.AutoGen\Extensions\ServiceDefaults\Microsoft.AutoGen.ServiceDefaults.csproj", "{65059914-5527-4A00-9308-9FAF23D5E85A}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Microsoft.AutoGen.Extensions.Aspire", "src\Microsoft.AutoGen\Extensions\Aspire\Microsoft.AutoGen.Extensions.Aspire.csproj", "{65059914-5527-4A00-9308-9FAF23D5E85A}" EndProject -Project("{FAE04EC0-301F-11D3-BF4B-00C04F79EFBC}") = "Microsoft.AutoGen.Agents.Tests", "test\Microsoft.AutoGen.Agents.Tests\Microsoft.AutoGen.Agents.Tests.csproj", "{394FDAF8-74F9-4977-94A5-3371737EB774}" +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "Microsoft.AutoGen.Agents.Tests", "test\Microsoft.AutoGen.Agents.Tests\Microsoft.AutoGen.Agents.Tests.csproj", "{394FDAF8-74F9-4977-94A5-3371737EB774}" EndProject Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Hello-distributed", "Hello-distributed", "{80A6C47B-0567-49F6-843A-667B96FA4BB4}" EndProject diff --git a/dotnet/AutoGen.v3.ncrunchsolution b/dotnet/AutoGen.v3.ncrunchsolution new file mode 100644 index 000000000000..13107d39442c --- /dev/null +++ b/dotnet/AutoGen.v3.ncrunchsolution @@ -0,0 +1,8 @@ + + + True + True + True + True + + \ No newline at end of file diff --git a/dotnet/Directory.Build.props b/dotnet/Directory.Build.props index 1e84f78232ad..e548a4b7ea83 100644 --- a/dotnet/Directory.Build.props +++ b/dotnet/Directory.Build.props @@ -3,6 +3,7 @@ + true netstandard2.0;net8.0 net8.0 preview @@ -20,7 +21,6 @@ true true false - embedded true @@ -29,6 +29,11 @@ $(MSBuildThisFileDirectory) + + $(VersionPrefixForAutoGen0_2) + true + + $(NoWarn);CA1829 diff --git a/dotnet/Directory.Packages.props b/dotnet/Directory.Packages.props index 2cde62118bb3..3793e34573a2 100644 --- a/dotnet/Directory.Packages.props +++ b/dotnet/Directory.Packages.props @@ -7,6 +7,7 @@ direct + @@ -16,12 +17,12 @@ - + - + @@ -32,91 +33,92 @@ - + - + - - - + + - + - + - - - - - - - - - - - - - + + + + + + + + + + + + + runtime; build; native; contentfiles; analyzers; buildtransitive all - - - - - - - - - - - - + + + + + + + + + + + + - + - - - + + + - - + + - - - - - + + + + + - - - - + + + + + - - + + + - + diff --git a/dotnet/PACKAGING.md b/dotnet/PACKAGING.md new file mode 100644 index 000000000000..af03850f7cea --- /dev/null +++ b/dotnet/PACKAGING.md @@ -0,0 +1,41 @@ +# Packaging AutoGen.NET + +This document describes the steps to pack the `AutoGen.NET` project. + +## Prerequisites + +- .NET SDK + +## Create Package + +1. **Restore and Build the Project** +```bash +dotnet restore +dotnet build --configuration Release --no-restore +``` + + +2. **Create the NuGet Package** +```bash +dotnet pack --configuration Release --no-build +``` + +This will generate both the `.nupkg` file and the `.snupkg` file in the `./artifacts/package/release` directory. + +For more details, refer to the [official .NET documentation](https://docs.microsoft.com/en-us/dotnet/core/tools/dotnet-pack). + +## Add new project to package list. +By default, when you add a new project to `AutoGen.sln`, it will not be included in the package list. To include the new project in the package, you need to add the following line to the new project's `.csproj` file + +e.g. + +```xml + +``` + +The `nuget-packages.props` enables `IsPackable` to `true` for the project, it also sets nenecessary metadata for the package. + +For more details, refer to the [NuGet folder](./nuget/README.md). + +## Package versioning +The version of the package is defined by `VersionPrefix` and `VersionPrefixForAutoGen0_2` in [MetaInfo.props](./eng/MetaInfo.props). If the name of your project starts with `AutoGen.`, the version will be set to `VersionPrefixForAutoGen0_2`, otherwise it will be set to `VersionPrefix`. diff --git a/dotnet/README.md b/dotnet/README.md index a5705114d72a..c34679389fc0 100644 --- a/dotnet/README.md +++ b/dotnet/README.md @@ -1,12 +1,12 @@ # AutoGen for .NET Thre are two sets of packages here: -Autogen.\* the older packages derived from Autogen 0.2 for .NET - these will gradually be deprecated and ported into the new packages +AutoGen.\* the older packages derived from AutoGen 0.2 for .NET - these will gradually be deprecated and ported into the new packages Microsoft.AutoGen.* the new packages for .NET that use the event-driven model - These APIs are not yet stable and are subject to change. To get started with the new packages, please see the [samples](./samples/) and in particular the [Hello](./samples/Hello) sample. -The remaining content is for the older Autogen.* packages. +You can install both new and old packages from the following feeds: [![dotnet-ci](https://github.com/microsoft/autogen/actions/workflows/dotnet-build.yml/badge.svg)](https://github.com/microsoft/autogen/actions/workflows/dotnet-build.yml) [![NuGet version](https://badge.fury.io/nu/AutoGen.Core.svg)](https://badge.fury.io/nu/AutoGen.Core) @@ -14,9 +14,7 @@ The remaining content is for the older Autogen.* packages. > [!NOTE] > Nightly build is available at: > -> - ![Static Badge](https://img.shields.io/badge/public-blue?style=flat) ![Static Badge](https://img.shields.io/badge/nightly-yellow?style=flat) ![Static Badge](https://img.shields.io/badge/github-grey?style=flat): -> - ![Static Badge](https://img.shields.io/badge/public-blue?style=flat) ![Static Badge](https://img.shields.io/badge/nightly-yellow?style=flat) ![Static Badge](https://img.shields.io/badge/myget-grey?style=flat): -> - ![Static Badge](https://img.shields.io/badge/internal-blue?style=flat) ![Static Badge](https://img.shields.io/badge/nightly-yellow?style=flat) ![Static Badge](https://img.shields.io/badge/azure_devops-grey?style=flat) : +> - [![Static Badge](https://img.shields.io/badge/azure_devops-grey?style=flat)](https://dev.azure.com/AGPublish/AGPublic/_artifacts/feed/AutoGen-Nightly) : Firstly, following the [installation guide](./website/articles/Installation.md) to install AutoGen packages. diff --git a/dotnet/eng/MetaInfo.props b/dotnet/eng/MetaInfo.props index 4f3d216aa08c..db46778a06c2 100644 --- a/dotnet/eng/MetaInfo.props +++ b/dotnet/eng/MetaInfo.props @@ -1,7 +1,8 @@ - 0.2.2 + 0.4.0 + 0.2.2 AutoGen https://microsoft.github.io/autogen-for-net/ https://github.com/microsoft/autogen diff --git a/dotnet/global.json b/dotnet/global.json index 5f78cce063fa..4f9e9b79a15a 100644 --- a/dotnet/global.json +++ b/dotnet/global.json @@ -1,6 +1,6 @@ { "sdk": { - "version": "8.0.104", + "version": "8.0.401", "rollForward": "latestMinor" } } diff --git a/dotnet/nuget/README.md b/dotnet/nuget/README.md new file mode 100644 index 000000000000..c95a97624788 --- /dev/null +++ b/dotnet/nuget/README.md @@ -0,0 +1,13 @@ +# NuGet Directory + +This directory contains resources and metadata for packaging the AutoGen.NET SDK as a NuGet package. + +## Files + +- **icon.png**: The icon used for the NuGet package. +- **NUGET.md**: The readme file displayed on the NuGet package page. +- **NUGET-PACKAGE.PROPS**: The MSBuild properties file that defines the packaging settings for the NuGet package. + +## Purpose + +The files in this directory are used to configure and build the NuGet package for the AutoGen.NET SDK, ensuring that it includes necessary metadata, documentation, and resources. \ No newline at end of file diff --git a/dotnet/nuget/nuget-package.props b/dotnet/nuget/nuget-package.props index 50cbafc73b66..380380794dc9 100644 --- a/dotnet/nuget/nuget-package.props +++ b/dotnet/nuget/nuget-package.props @@ -7,7 +7,7 @@ Microsoft AutoGen A programming framework for agentic AI - AI, Artificial Intelligence, SDK + AI, Artificial Intelligence, Agents, Multiagent, SDK $(AssemblyName) diff --git a/dotnet/samples/Hello-distributed/Backend/Agents/OutputAgent.cs b/dotnet/samples/Hello-distributed/Backend/Agents/OutputAgent.cs index 1771793a58a7..5c109519dd5a 100644 --- a/dotnet/samples/Hello-distributed/Backend/Agents/OutputAgent.cs +++ b/dotnet/samples/Hello-distributed/Backend/Agents/OutputAgent.cs @@ -16,6 +16,6 @@ public class OutputAgent( { public async Task Handle(NewGreetingGenerated item) { - _logger.LogInformation($"OutputAgent with Id: {AgentId} received NewGreetingGenerated with {item.UserMessage}"); + _logger.LogInformation($"OutputAgent with Id: {AgentId} received NewGreetingGenerated with {item.UserMessage}"); } } diff --git a/dotnet/samples/Hello-distributed/Backend/Backend.csproj b/dotnet/samples/Hello-distributed/Backend/Backend.csproj index d38faf01497a..3db680932b07 100644 --- a/dotnet/samples/Hello-distributed/Backend/Backend.csproj +++ b/dotnet/samples/Hello-distributed/Backend/Backend.csproj @@ -17,17 +17,18 @@ + + - - + diff --git a/dotnet/samples/Hello-distributed/Backend/Program.cs b/dotnet/samples/Hello-distributed/Backend/Program.cs index f94daae5eeb2..162f835a0be2 100644 --- a/dotnet/samples/Hello-distributed/Backend/Program.cs +++ b/dotnet/samples/Hello-distributed/Backend/Program.cs @@ -28,7 +28,7 @@ app.MapDefaultEndpoints(); -app.MapPost("/sessions", async ([FromBody]string message, Client client) => +app.MapPost("/sessions", async ([FromBody] string message, Client client) => { var session = Guid.NewGuid().ToString(); await client.PublishEventAsync(new NewGreetingRequested { Message = message }.ToCloudEvent(session)); @@ -37,14 +37,14 @@ app.MapGet("/sessions/{session}", async (string session) => { - + return session; }); app.UseRouting() .UseEndpoints(endpoints => { - + }); ; app.UseSwagger(); diff --git a/dotnet/samples/Hello/Backend/Backend.csproj b/dotnet/samples/Hello/Backend/Backend.csproj index d502d7260d15..360459334805 100644 --- a/dotnet/samples/Hello/Backend/Backend.csproj +++ b/dotnet/samples/Hello/Backend/Backend.csproj @@ -1,4 +1,4 @@ - + diff --git a/dotnet/samples/Hello/Backend/Program.cs b/dotnet/samples/Hello/Backend/Program.cs index b913d39d643f..b74dba139826 100644 --- a/dotnet/samples/Hello/Backend/Program.cs +++ b/dotnet/samples/Hello/Backend/Program.cs @@ -1,7 +1,5 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Program.cs -using Microsoft.Extensions.Hosting; - var app = await Microsoft.AutoGen.Agents.Host.StartAsync(local: false, useGrpc: true); await app.WaitForShutdownAsync(); diff --git a/dotnet/samples/Hello/Backend/appsettings.json b/dotnet/samples/Hello/Backend/appsettings.json index 3bb8d882550c..ae32fe371a70 100644 --- a/dotnet/samples/Hello/Backend/appsettings.json +++ b/dotnet/samples/Hello/Backend/appsettings.json @@ -1,9 +1,15 @@ { - "Logging": { - "LogLevel": { - "Default": "Warning", - "Microsoft": "Warning", - "Microsoft.Orleans": "Warning" - } + "Logging": { + "LogLevel": { + "Default": "Warning", + "Microsoft": "Warning", + "Microsoft.Orleans": "Warning" } - } \ No newline at end of file + }, + "AllowedHosts": "*", + "Kestrel": { + "EndpointDefaults": { + "Protocols": "Http2" + } + } +} diff --git a/dotnet/samples/Hello/Hello.AppHost/Program.cs b/dotnet/samples/Hello/Hello.AppHost/Program.cs index d9acc3ea3f12..326eddbcc9ec 100644 --- a/dotnet/samples/Hello/Hello.AppHost/Program.cs +++ b/dotnet/samples/Hello/Hello.AppHost/Program.cs @@ -1,7 +1,19 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // Program.cs +using Microsoft.Extensions.Hosting; + var builder = DistributedApplication.CreateBuilder(args); -var backend = builder.AddProject("backend"); -builder.AddProject("client").WithReference(backend).WaitFor(backend); -builder.Build().Run(); +var backend = builder.AddProject("backend").WithExternalHttpEndpoints(); +builder.AddProject("client") + .WithReference(backend) + .WithEnvironment("AGENT_HOST", $"{backend.GetEndpoint("https").Property(EndpointProperty.Url)}") + .WaitFor(backend); + +using var app = builder.Build(); + +await app.StartAsync(); +var url = backend.GetEndpoint("http").Url; +Console.WriteLine("Backend URL: " + url); + +await app.WaitForShutdownAsync(); diff --git a/dotnet/samples/Hello/HelloAIAgents/HelloAIAgents.csproj b/dotnet/samples/Hello/HelloAIAgents/HelloAIAgents.csproj index f17ab0c9f0a5..c33bfeed5a8d 100644 --- a/dotnet/samples/Hello/HelloAIAgents/HelloAIAgents.csproj +++ b/dotnet/samples/Hello/HelloAIAgents/HelloAIAgents.csproj @@ -1,7 +1,7 @@ Exe - net8.0 + net8.0 enable enable @@ -13,6 +13,6 @@ - + diff --git a/dotnet/samples/Hello/HelloAgent/HelloAgent.csproj b/dotnet/samples/Hello/HelloAgent/HelloAgent.csproj index f2f3e473fef7..93c996e32093 100644 --- a/dotnet/samples/Hello/HelloAgent/HelloAgent.csproj +++ b/dotnet/samples/Hello/HelloAgent/HelloAgent.csproj @@ -1,14 +1,17 @@ - + Exe - net8.0 + net8.0 enable enable - + + + PreserveNewest + + - diff --git a/dotnet/samples/Hello/HelloAgent/Program.cs b/dotnet/samples/Hello/HelloAgent/Program.cs index 506d91502328..4f74520a71e0 100644 --- a/dotnet/samples/Hello/HelloAgent/Program.cs +++ b/dotnet/samples/Hello/HelloAgent/Program.cs @@ -19,7 +19,7 @@ { Message = "World" }, local: true); - +//var app = await AgentsApp.StartAsync(); await app.WaitForShutdownAsync(); namespace Hello @@ -33,7 +33,8 @@ public class HelloAgent( ISayHello, IHandleConsole, IHandle, - IHandle + IHandle, + IHandle { public async Task Handle(NewMessageReceived item) { @@ -50,13 +51,14 @@ public async Task Handle(NewMessageReceived item) public async Task Handle(ConversationClosed item) { var goodbye = $"********************* {item.UserId} said {item.UserMessage} ************************"; - var evt = new Output - { - Message = goodbye - }; - await PublishMessageAsync(evt).ConfigureAwait(false); + var evt = new Output { Message = goodbye }; + await PublishMessageAsync(evt).ConfigureAwait(true); + await PublishMessageAsync(new Shutdown()).ConfigureAwait(false); + } - // Signal shutdown. + public async Task Handle(Shutdown item) + { + Console.WriteLine("Shutting down..."); hostApplicationLifetime.StopApplication(); } diff --git a/dotnet/samples/Hello/HelloAgentState/HelloAgentState.csproj b/dotnet/samples/Hello/HelloAgentState/HelloAgentState.csproj index 797fe957bb75..e26b6c9521c2 100644 --- a/dotnet/samples/Hello/HelloAgentState/HelloAgentState.csproj +++ b/dotnet/samples/Hello/HelloAgentState/HelloAgentState.csproj @@ -1,7 +1,7 @@ Exe - net8.0 + net8.0 enable enable diff --git a/dotnet/samples/Hello/HelloAgentState/Program.cs b/dotnet/samples/Hello/HelloAgentState/Program.cs index 7c15c4c54df0..664689de824d 100644 --- a/dotnet/samples/Hello/HelloAgentState/Program.cs +++ b/dotnet/samples/Hello/HelloAgentState/Program.cs @@ -9,7 +9,7 @@ var app = await AgentsApp.PublishMessageAsync("HelloAgents", new NewMessageReceived { Message = "World" -}, local: true); +}, local: false); await app.WaitForShutdownAsync(); diff --git a/dotnet/samples/dev-team/DevTeam.AgentHost/DevTeam.AgentHost.csproj b/dotnet/samples/dev-team/DevTeam.AgentHost/DevTeam.AgentHost.csproj index d8d7ebf8e48b..7508ae5af56e 100644 --- a/dotnet/samples/dev-team/DevTeam.AgentHost/DevTeam.AgentHost.csproj +++ b/dotnet/samples/dev-team/DevTeam.AgentHost/DevTeam.AgentHost.csproj @@ -1,7 +1,7 @@ - net8.0 + net8.0 enable enable @@ -10,7 +10,7 @@ - + diff --git a/dotnet/samples/dev-team/DevTeam.Agents/DevTeam.Agents.csproj b/dotnet/samples/dev-team/DevTeam.Agents/DevTeam.Agents.csproj index 8dfd6912e547..46a20c650fb7 100644 --- a/dotnet/samples/dev-team/DevTeam.Agents/DevTeam.Agents.csproj +++ b/dotnet/samples/dev-team/DevTeam.Agents/DevTeam.Agents.csproj @@ -1,7 +1,7 @@ - net8.0 + net8.0 enable enable @@ -10,7 +10,7 @@ - + diff --git a/dotnet/samples/dev-team/DevTeam.AppHost/DevTeam.AppHost.csproj b/dotnet/samples/dev-team/DevTeam.AppHost/DevTeam.AppHost.csproj index 7dc1a6deafd9..89d121b303ea 100644 --- a/dotnet/samples/dev-team/DevTeam.AppHost/DevTeam.AppHost.csproj +++ b/dotnet/samples/dev-team/DevTeam.AppHost/DevTeam.AppHost.csproj @@ -4,7 +4,7 @@ Exe - net8.0 + net8.0 enable enable true diff --git a/dotnet/samples/dev-team/DevTeam.Backend/DevTeam.Backend.csproj b/dotnet/samples/dev-team/DevTeam.Backend/DevTeam.Backend.csproj index 8296f7aa670e..10e05cfb2107 100644 --- a/dotnet/samples/dev-team/DevTeam.Backend/DevTeam.Backend.csproj +++ b/dotnet/samples/dev-team/DevTeam.Backend/DevTeam.Backend.csproj @@ -5,7 +5,7 @@ - net8.0 + net8.0 enable enable @@ -29,7 +29,7 @@ - + diff --git a/dotnet/samples/dev-team/DevTeam.Backend/Program.cs b/dotnet/samples/dev-team/DevTeam.Backend/Program.cs index 08f3c3a8a7c1..2cadfe01e933 100644 --- a/dotnet/samples/dev-team/DevTeam.Backend/Program.cs +++ b/dotnet/samples/dev-team/DevTeam.Backend/Program.cs @@ -67,9 +67,9 @@ }); ; app.UseSwagger(); -app.UseSwaggerUI(c => +/* app.UseSwaggerUI(c => { c.SwaggerEndpoint("/swagger/v1/swagger.json", "My API V1"); -}); +}); */ app.Run(); diff --git a/dotnet/samples/dev-team/DevTeam.Shared/DevTeam.Shared.csproj b/dotnet/samples/dev-team/DevTeam.Shared/DevTeam.Shared.csproj index bc739135da95..18fcb9745238 100644 --- a/dotnet/samples/dev-team/DevTeam.Shared/DevTeam.Shared.csproj +++ b/dotnet/samples/dev-team/DevTeam.Shared/DevTeam.Shared.csproj @@ -5,7 +5,7 @@ - net8.0 + net8.0 enable enable diff --git a/dotnet/samples/dev-team/dev team.sln b/dotnet/samples/dev-team/dev team.sln new file mode 100644 index 000000000000..f8a7aeacd924 --- /dev/null +++ b/dotnet/samples/dev-team/dev team.sln @@ -0,0 +1,49 @@ + +Microsoft Visual Studio Solution File, Format Version 12.00 +# Visual Studio Version 17 +VisualStudioVersion = 17.11.35327.3 +MinimumVisualStudioVersion = 10.0.40219.1 +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "DevTeam.AgentHost", "DevTeam.AgentHost\DevTeam.AgentHost.csproj", "{A6FC8B01-A177-4690-BD16-73EE3D0C06A0}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "DevTeam.Backend", "DevTeam.Backend\DevTeam.Backend.csproj", "{2D4BAD10-85F3-4E4B-B759-13449A212A96}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "DevTeam.Agents", "DevTeam.Agents\DevTeam.Agents.csproj", "{A51CE540-72B0-4271-B63D-A30CAB61C227}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "DevTeam.AppHost", "DevTeam.AppHost\DevTeam.AppHost.csproj", "{2B8A3C64-9F4E-4CC5-9466-AFFD8E676D2E}" +EndProject +Project("{9A19103F-16F7-4668-BE54-9A1E7A4F7556}") = "DevTeam.Shared", "DevTeam.Shared\DevTeam.Shared.csproj", "{557701A5-35D8-4CE3-BA75-D5412B4227F5}" +EndProject +Global + GlobalSection(SolutionConfigurationPlatforms) = preSolution + Debug|Any CPU = Debug|Any CPU + Release|Any CPU = Release|Any CPU + EndGlobalSection + GlobalSection(ProjectConfigurationPlatforms) = postSolution + {A6FC8B01-A177-4690-BD16-73EE3D0C06A0}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {A6FC8B01-A177-4690-BD16-73EE3D0C06A0}.Debug|Any CPU.Build.0 = Debug|Any CPU + {A6FC8B01-A177-4690-BD16-73EE3D0C06A0}.Release|Any CPU.ActiveCfg = Release|Any CPU + {A6FC8B01-A177-4690-BD16-73EE3D0C06A0}.Release|Any CPU.Build.0 = Release|Any CPU + {2D4BAD10-85F3-4E4B-B759-13449A212A96}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {2D4BAD10-85F3-4E4B-B759-13449A212A96}.Debug|Any CPU.Build.0 = Debug|Any CPU + {2D4BAD10-85F3-4E4B-B759-13449A212A96}.Release|Any CPU.ActiveCfg = Release|Any CPU + {2D4BAD10-85F3-4E4B-B759-13449A212A96}.Release|Any CPU.Build.0 = Release|Any CPU + {A51CE540-72B0-4271-B63D-A30CAB61C227}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {A51CE540-72B0-4271-B63D-A30CAB61C227}.Debug|Any CPU.Build.0 = Debug|Any CPU + {A51CE540-72B0-4271-B63D-A30CAB61C227}.Release|Any CPU.ActiveCfg = Release|Any CPU + {A51CE540-72B0-4271-B63D-A30CAB61C227}.Release|Any CPU.Build.0 = Release|Any CPU + {2B8A3C64-9F4E-4CC5-9466-AFFD8E676D2E}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {2B8A3C64-9F4E-4CC5-9466-AFFD8E676D2E}.Debug|Any CPU.Build.0 = Debug|Any CPU + {2B8A3C64-9F4E-4CC5-9466-AFFD8E676D2E}.Release|Any CPU.ActiveCfg = Release|Any CPU + {2B8A3C64-9F4E-4CC5-9466-AFFD8E676D2E}.Release|Any CPU.Build.0 = Release|Any CPU + {557701A5-35D8-4CE3-BA75-D5412B4227F5}.Debug|Any CPU.ActiveCfg = Debug|Any CPU + {557701A5-35D8-4CE3-BA75-D5412B4227F5}.Debug|Any CPU.Build.0 = Debug|Any CPU + {557701A5-35D8-4CE3-BA75-D5412B4227F5}.Release|Any CPU.ActiveCfg = Release|Any CPU + {557701A5-35D8-4CE3-BA75-D5412B4227F5}.Release|Any CPU.Build.0 = Release|Any CPU + EndGlobalSection + GlobalSection(SolutionProperties) = preSolution + HideSolutionNode = FALSE + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + SolutionGuid = {DE04DB59-B8CD-4305-875B-E71442345CCF} + EndGlobalSection +EndGlobal diff --git a/dotnet/src/AutoGen.Core/Function/FunctionAttribute.cs b/dotnet/src/AutoGen.Core/Function/FunctionAttribute.cs index 9418dc7fd6ae..9367f5c6f297 100644 --- a/dotnet/src/AutoGen.Core/Function/FunctionAttribute.cs +++ b/dotnet/src/AutoGen.Core/Function/FunctionAttribute.cs @@ -98,7 +98,7 @@ public static implicit operator AIFunctionMetadata(FunctionContract contract) [NamespaceKey] = contract.Namespace, [ClassNameKey] = contract.ClassName, }, - Parameters = [.. contract.Parameters?.Select(p => (AIFunctionParameterMetadata)p)], + Parameters = [.. contract.Parameters?.Select(p => (AIFunctionParameterMetadata)p)!], }; } } diff --git a/dotnet/src/AutoGen.Core/Message/ImageMessage.cs b/dotnet/src/AutoGen.Core/Message/ImageMessage.cs index 369a3782d502..37be3a7c7ed1 100644 --- a/dotnet/src/AutoGen.Core/Message/ImageMessage.cs +++ b/dotnet/src/AutoGen.Core/Message/ImageMessage.cs @@ -2,39 +2,65 @@ // ImageMessage.cs using System; +using System.Text.RegularExpressions; namespace AutoGen.Core; public class ImageMessage : IMessage { - public ImageMessage(Role role, string url, string? from = null, string? mimeType = null) - : this(role, new Uri(url), from, mimeType) - { - } + private static readonly Regex s_DataUriRegex = new Regex(@"^data:(?[^;]+);base64,(?.*)$", RegexOptions.Compiled); - public ImageMessage(Role role, Uri uri, string? from = null, string? mimeType = null) + /// + /// Create an ImageMessage from a url. + /// The url can be a regular url or a data uri. + /// If the url is a data uri, the scheme must be "data" and the format must be data:[][;base64], + /// + public ImageMessage(Role role, string url, string? from = null, string? mimeType = null) { this.Role = role; this.From = from; - this.Url = uri.ToString(); - // try infer mimeType from uri extension if not provided - if (mimeType is null) + // url might be a data uri or a regular url + if (url.StartsWith("data:", StringComparison.OrdinalIgnoreCase)) + { + // the url must be in the format of data:[][;base64], + var match = s_DataUriRegex.Match(url); + + if (!match.Success) + { + throw new ArgumentException("Invalid DataUri format, expected data:[][;base64],", nameof(url)); + } + + this.Data = new BinaryData(Convert.FromBase64String(match.Groups["data"].Value), match.Groups["mediatype"].Value); + + this.MimeType = match.Groups["mediatype"].Value; + } + else { - mimeType = uri switch + this.Url = url; + // try infer mimeType from uri extension if not provided + if (mimeType is null) { - _ when uri.AbsoluteUri.EndsWith(".png", StringComparison.OrdinalIgnoreCase) => "image/png", - _ when uri.AbsoluteUri.EndsWith(".jpg", StringComparison.OrdinalIgnoreCase) => "image/jpeg", - _ when uri.AbsoluteUri.EndsWith(".jpeg", StringComparison.OrdinalIgnoreCase) => "image/jpeg", - _ when uri.AbsoluteUri.EndsWith(".gif", StringComparison.OrdinalIgnoreCase) => "image/gif", - _ when uri.AbsoluteUri.EndsWith(".bmp", StringComparison.OrdinalIgnoreCase) => "image/bmp", - _ when uri.AbsoluteUri.EndsWith(".webp", StringComparison.OrdinalIgnoreCase) => "image/webp", - _ when uri.AbsoluteUri.EndsWith(".svg", StringComparison.OrdinalIgnoreCase) => "image/svg+xml", - _ => throw new ArgumentException("MimeType is required for ImageMessage", nameof(mimeType)) - }; + mimeType = url switch + { + _ when url.EndsWith(".png", StringComparison.OrdinalIgnoreCase) => "image/png", + _ when url.EndsWith(".jpg", StringComparison.OrdinalIgnoreCase) => "image/jpeg", + _ when url.EndsWith(".jpeg", StringComparison.OrdinalIgnoreCase) => "image/jpeg", + _ when url.EndsWith(".gif", StringComparison.OrdinalIgnoreCase) => "image/gif", + _ when url.EndsWith(".bmp", StringComparison.OrdinalIgnoreCase) => "image/bmp", + _ when url.EndsWith(".webp", StringComparison.OrdinalIgnoreCase) => "image/webp", + _ when url.EndsWith(".svg", StringComparison.OrdinalIgnoreCase) => "image/svg+xml", + _ => throw new ArgumentException("MimeType is required for ImageMessage", nameof(mimeType)) + }; + } + + this.MimeType = mimeType; } + } - this.MimeType = mimeType; + public ImageMessage(Role role, Uri uri, string? from = null, string? mimeType = null) + : this(role, uri.ToString(), from, mimeType) + { } public ImageMessage(Role role, BinaryData data, string? from = null) diff --git a/dotnet/src/AutoGen.OpenAI/AutoGen.OpenAI.csproj b/dotnet/src/AutoGen.OpenAI/AutoGen.OpenAI.csproj index 7f00b63be86c..70c0f2b0d1ce 100644 --- a/dotnet/src/AutoGen.OpenAI/AutoGen.OpenAI.csproj +++ b/dotnet/src/AutoGen.OpenAI/AutoGen.OpenAI.csproj @@ -18,6 +18,7 @@ + diff --git a/dotnet/src/AutoGen.OpenAI/Orchestrator/RolePlayToolCallOrchestrator.cs b/dotnet/src/AutoGen.OpenAI/Orchestrator/RolePlayToolCallOrchestrator.cs new file mode 100644 index 000000000000..f088e1748e66 --- /dev/null +++ b/dotnet/src/AutoGen.OpenAI/Orchestrator/RolePlayToolCallOrchestrator.cs @@ -0,0 +1,133 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// RolePlayToolCallOrchestrator.cs + +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading; +using System.Threading.Tasks; +using AutoGen.OpenAI.Extension; +using OpenAI.Chat; + +namespace AutoGen.OpenAI.Orchestrator; + +/// +/// Orchestrating group chat using role play tool call +/// +public partial class RolePlayToolCallOrchestrator : IOrchestrator +{ + public readonly ChatClient chatClient; + private readonly Graph? workflow; + + public RolePlayToolCallOrchestrator(ChatClient chatClient, Graph? workflow = null) + { + this.chatClient = chatClient; + this.workflow = workflow; + } + + public async Task GetNextSpeakerAsync( + OrchestrationContext context, + CancellationToken cancellationToken = default) + { + var candidates = context.Candidates.ToList(); + + if (candidates.Count == 0) + { + return null; + } + + if (candidates.Count == 1) + { + return candidates.First(); + } + + // if there's a workflow + // and the next available agent from the workflow is in the group chat + // then return the next agent from the workflow + if (this.workflow != null) + { + var lastMessage = context.ChatHistory.LastOrDefault(); + if (lastMessage == null) + { + return null; + } + var currentSpeaker = candidates.First(candidates => candidates.Name == lastMessage.From); + var nextAgents = await this.workflow.TransitToNextAvailableAgentsAsync(currentSpeaker, context.ChatHistory, cancellationToken); + nextAgents = nextAgents.Where(nextAgent => candidates.Any(candidate => candidate.Name == nextAgent.Name)); + candidates = nextAgents.ToList(); + if (!candidates.Any()) + { + return null; + } + + if (candidates is { Count: 1 }) + { + return candidates.First(); + } + } + + // In this case, since there are more than one available agents from the workflow for the next speaker + // We need to invoke LLM to select the next speaker via select next speaker function + + var chatHistoryStringBuilder = new StringBuilder(); + foreach (var message in context.ChatHistory) + { + var chatHistoryPrompt = $"{message.From}: {message.GetContent()}"; + + chatHistoryStringBuilder.AppendLine(chatHistoryPrompt); + } + + var chatHistory = chatHistoryStringBuilder.ToString(); + + var prompt = $""" + # Task: Select the next speaker + + You are in a role-play game. Carefully read the conversation history and select the next speaker from the available roles. + + # Conversation + {chatHistory} + + # Available roles + - {string.Join(",", candidates.Select(candidate => candidate.Name))} + + Select the next speaker from the available roles and provide a reason for your selection. + """; + + // enforce the next speaker to be selected by the LLM + var option = new ChatCompletionOptions + { + ToolChoice = ChatToolChoice.CreateFunctionChoice(this.SelectNextSpeakerFunctionContract.Name), + }; + + option.Tools.Add(this.SelectNextSpeakerFunctionContract.ToChatTool()); + var toolCallMiddleware = new FunctionCallMiddleware( + functions: [this.SelectNextSpeakerFunctionContract], + functionMap: new Dictionary>> + { + [this.SelectNextSpeakerFunctionContract.Name] = this.SelectNextSpeakerWrapper, + }); + + var selectAgent = new OpenAIChatAgent( + chatClient, + "admin", + option) + .RegisterMessageConnector() + .RegisterMiddleware(toolCallMiddleware); + + var reply = await selectAgent.SendAsync(prompt); + + var nextSpeaker = candidates.FirstOrDefault(candidate => candidate.Name == reply.GetContent()); + + return nextSpeaker; + } + + /// + /// Select the next speaker by name and reason + /// + [Function] + public async Task SelectNextSpeaker(string name, string reason) + { + return name; + } +} diff --git a/dotnet/src/AutoGen.SemanticKernel/Middleware/SemanticKernelChatMessageContentConnector.cs b/dotnet/src/AutoGen.SemanticKernel/Middleware/SemanticKernelChatMessageContentConnector.cs index 073709ebad09..92947092ba28 100644 --- a/dotnet/src/AutoGen.SemanticKernel/Middleware/SemanticKernelChatMessageContentConnector.cs +++ b/dotnet/src/AutoGen.SemanticKernel/Middleware/SemanticKernelChatMessageContentConnector.cs @@ -181,7 +181,19 @@ private IEnumerable ProcessMessageForOthers(TextMessage mess private IEnumerable ProcessMessageForOthers(ImageMessage message) { var collectionItems = new ChatMessageContentItemCollection(); - collectionItems.Add(new ImageContent(new Uri(message.Url ?? message.BuildDataUri()))); + if (message.Url is not null) + { + collectionItems.Add(new ImageContent(new Uri(message.Url))); + } + else if (message.BuildDataUri() is string dataUri) + { + collectionItems.Add(new ImageContent(dataUri)); + } + else + { + throw new InvalidOperationException("ImageMessage must have Url or DataUri"); + } + return [new ChatMessageContent(AuthorRole.User, collectionItems)]; } diff --git a/dotnet/src/Microsoft.AutoGen/Abstractions/IAgentBase.cs b/dotnet/src/Microsoft.AutoGen/Abstractions/IAgentBase.cs index 14c2688c236a..ee7b9e74583c 100644 --- a/dotnet/src/Microsoft.AutoGen/Abstractions/IAgentBase.cs +++ b/dotnet/src/Microsoft.AutoGen/Abstractions/IAgentBase.cs @@ -19,4 +19,5 @@ public interface IAgentBase Task ReadAsync(AgentId agentId, CancellationToken cancellationToken = default) where T : IMessage, new(); ValueTask PublishEventAsync(CloudEvent item, CancellationToken cancellationToken = default); ValueTask PublishEventAsync(string topic, IMessage evt, CancellationToken cancellationToken = default); + List Subscribe(string topic); } diff --git a/dotnet/src/Microsoft.AutoGen/Abstractions/IAgentRuntime.cs b/dotnet/src/Microsoft.AutoGen/Abstractions/IAgentRuntime.cs index 2125e57a8b96..6b3d4f98cdb2 100644 --- a/dotnet/src/Microsoft.AutoGen/Abstractions/IAgentRuntime.cs +++ b/dotnet/src/Microsoft.AutoGen/Abstractions/IAgentRuntime.cs @@ -13,9 +13,10 @@ public interface IAgentRuntime ValueTask ReadAsync(AgentId agentId, CancellationToken cancellationToken = default); ValueTask SendResponseAsync(RpcRequest request, RpcResponse response, CancellationToken cancellationToken = default); ValueTask SendRequestAsync(IAgentBase agent, RpcRequest request, CancellationToken cancellationToken = default); + ValueTask SendMessageAsync(Message message, CancellationToken cancellationToken = default); ValueTask PublishEventAsync(CloudEvent @event, CancellationToken cancellationToken = default); - void Update(Activity? activity, RpcRequest request); - void Update(Activity? activity, CloudEvent cloudEvent); - (string?, string?) GetTraceIDandState(IDictionary metadata); + void Update(RpcRequest request, Activity? activity); + void Update(CloudEvent cloudEvent, Activity? activity); + (string?, string?) GetTraceIdAndState(IDictionary metadata); IDictionary ExtractMetadata(IDictionary metadata); } diff --git a/dotnet/src/Microsoft.AutoGen/Abstractions/IAgentState.cs b/dotnet/src/Microsoft.AutoGen/Abstractions/IAgentState.cs index 0a6784b54fd3..1b816b4ef3ad 100644 --- a/dotnet/src/Microsoft.AutoGen/Abstractions/IAgentState.cs +++ b/dotnet/src/Microsoft.AutoGen/Abstractions/IAgentState.cs @@ -3,8 +3,24 @@ namespace Microsoft.AutoGen.Abstractions; +/// +/// Interface for managing the state of an agent. +/// public interface IAgentState { - ValueTask ReadStateAsync(); - ValueTask WriteStateAsync(AgentState state, string eTag); + /// + /// Reads the current state of the agent asynchronously. + /// + /// A token to cancel the operation. + /// A task that represents the asynchronous read operation. The task result contains the current state of the agent. + ValueTask ReadStateAsync(CancellationToken cancellationToken = default); + + /// + /// Writes the specified state of the agent asynchronously. + /// + /// The state to write. + /// The ETag for concurrency control. + /// A token to cancel the operation. + /// A task that represents the asynchronous write operation. The task result contains the ETag of the written state. + ValueTask WriteStateAsync(AgentState state, string eTag, CancellationToken cancellationToken = default); } diff --git a/dotnet/src/Microsoft.AutoGen/Abstractions/IAgentWorker.cs b/dotnet/src/Microsoft.AutoGen/Abstractions/IAgentWorker.cs index 67a867d87dfa..adce9be60c9e 100644 --- a/dotnet/src/Microsoft.AutoGen/Abstractions/IAgentWorker.cs +++ b/dotnet/src/Microsoft.AutoGen/Abstractions/IAgentWorker.cs @@ -8,6 +8,7 @@ public interface IAgentWorker ValueTask PublishEventAsync(CloudEvent evt, CancellationToken cancellationToken = default); ValueTask SendRequestAsync(IAgentBase agent, RpcRequest request, CancellationToken cancellationToken = default); ValueTask SendResponseAsync(RpcResponse response, CancellationToken cancellationToken = default); + ValueTask SendMessageAsync(Message message, CancellationToken cancellationToken = default); ValueTask StoreAsync(AgentState value, CancellationToken cancellationToken = default); ValueTask ReadAsync(AgentId agentId, CancellationToken cancellationToken = default); } diff --git a/dotnet/src/Microsoft.AutoGen/Abstractions/Microsoft.AutoGen.Abstractions.csproj b/dotnet/src/Microsoft.AutoGen/Abstractions/Microsoft.AutoGen.Abstractions.csproj index e24b52187c82..39a90664057e 100644 --- a/dotnet/src/Microsoft.AutoGen/Abstractions/Microsoft.AutoGen.Abstractions.csproj +++ b/dotnet/src/Microsoft.AutoGen/Abstractions/Microsoft.AutoGen.Abstractions.csproj @@ -4,12 +4,10 @@ net8.0 enable enable - AutoGen.Core - https://github.com/microsoft/agnext - Microsoft - AutoGenn Core Library + + @@ -20,8 +18,6 @@ - - diff --git a/dotnet/src/Microsoft.AutoGen/AgentHost/Microsoft.AutoGen.AgentHost.csproj b/dotnet/src/Microsoft.AutoGen/AgentHost/Microsoft.AutoGen.AgentHost.csproj index 254ff1a6c37f..0d3ecef5d5ba 100644 --- a/dotnet/src/Microsoft.AutoGen/AgentHost/Microsoft.AutoGen.AgentHost.csproj +++ b/dotnet/src/Microsoft.AutoGen/AgentHost/Microsoft.AutoGen.AgentHost.csproj @@ -12,7 +12,7 @@ - + diff --git a/dotnet/src/Microsoft.AutoGen/Agents/AgentBase.cs b/dotnet/src/Microsoft.AutoGen/Agents/AgentBase.cs index f73894978fc8..545734b50575 100644 --- a/dotnet/src/Microsoft.AutoGen/Agents/AgentBase.cs +++ b/dotnet/src/Microsoft.AutoGen/Agents/AgentBase.cs @@ -15,27 +15,40 @@ namespace Microsoft.AutoGen.Agents; public abstract class AgentBase : IAgentBase, IHandle { public static readonly ActivitySource s_source = new("AutoGen.Agent"); - public AgentId AgentId => _context.AgentId; + public AgentId AgentId => _runtime.AgentId; private readonly object _lock = new(); private readonly Dictionary> _pendingRequests = []; private readonly Channel _mailbox = Channel.CreateUnbounded(); - private readonly IAgentRuntime _context; + private readonly IAgentRuntime _runtime; public string Route { get; set; } = "base"; protected internal ILogger _logger; - public IAgentRuntime Context => _context; + public IAgentRuntime Context => _runtime; protected readonly EventTypes EventTypes; protected AgentBase( - IAgentRuntime context, + IAgentRuntime runtime, EventTypes eventTypes, ILogger? logger = null) { - _context = context; - context.AgentInstance = this; + _runtime = runtime; + runtime.AgentInstance = this; this.EventTypes = eventTypes; _logger = logger ?? LoggerFactory.Create(builder => { }).CreateLogger(); + var subscriptionRequest = new AddSubscriptionRequest + { + RequestId = Guid.NewGuid().ToString(), + Subscription = new Subscription + { + TypeSubscription = new TypeSubscription + { + AgentType = this.AgentId.Type, + TopicType = this.AgentId.Type + "/" + this.AgentId.Key + } + } + }; + _runtime.SendMessageAsync(new Message { AddSubscriptionRequest = subscriptionRequest }).AsTask().Wait(); Completion = Start(); } internal Task Completion { get; } @@ -93,7 +106,7 @@ protected internal async Task HandleRpcMessage(Message msg, CancellationToken ca { var activity = this.ExtractActivity(msg.CloudEvent.Type, msg.CloudEvent.Metadata); await this.InvokeWithActivityAsync( - static ((AgentBase Agent, CloudEvent Item) state) => state.Agent.CallHandler(state.Item), + static ((AgentBase Agent, CloudEvent Item) state, CancellationToken _) => state.Agent.CallHandler(state.Item), (this, msg.CloudEvent), activity, msg.CloudEvent.Type, cancellationToken).ConfigureAwait(false); @@ -103,7 +116,7 @@ await this.InvokeWithActivityAsync( { var activity = this.ExtractActivity(msg.Request.Method, msg.Request.Metadata); await this.InvokeWithActivityAsync( - static ((AgentBase Agent, RpcRequest Request) state) => state.Agent.OnRequestCoreAsync(state.Request), + static ((AgentBase Agent, RpcRequest Request) state, CancellationToken ct) => state.Agent.OnRequestCoreAsync(state.Request, ct), (this, msg.Request), activity, msg.Request.Method, cancellationToken).ConfigureAwait(false); @@ -114,14 +127,35 @@ await this.InvokeWithActivityAsync( break; } } + public List Subscribe(string topic) + { + Message message = new() + { + AddSubscriptionRequest = new() + { + RequestId = Guid.NewGuid().ToString(), + Subscription = new Subscription + { + TypeSubscription = new TypeSubscription + { + TopicType = topic, + AgentType = this.AgentId.Key + } + } + } + }; + _runtime.SendMessageAsync(message).AsTask().Wait(); + + return new List { topic }; + } public async Task StoreAsync(AgentState state, CancellationToken cancellationToken = default) { - await _context.StoreAsync(state, cancellationToken).ConfigureAwait(false); + await _runtime.StoreAsync(state, cancellationToken).ConfigureAwait(false); return; } public async Task ReadAsync(AgentId agentId, CancellationToken cancellationToken = default) where T : IMessage, new() { - var agentstate = await _context.ReadAsync(agentId, cancellationToken).ConfigureAwait(false); + var agentstate = await _runtime.ReadAsync(agentId, cancellationToken).ConfigureAwait(false); return agentstate.FromAgentState(); } private void OnResponseCore(RpcResponse response) @@ -150,7 +184,7 @@ private async Task OnRequestCoreAsync(RpcRequest request, CancellationToken canc { response = new RpcResponse { Error = ex.Message }; } - await _context.SendResponseAsync(request, response, cancellationToken).ConfigureAwait(false); + await _runtime.SendResponseAsync(request, response, cancellationToken).ConfigureAwait(false); } protected async Task RequestAsync(AgentId target, string method, Dictionary parameters) @@ -174,9 +208,9 @@ protected async Task RequestAsync(AgentId target, string method, Di activity?.SetTag("peer.service", target.ToString()); var completion = new TaskCompletionSource(TaskCreationOptions.RunContinuationsAsynchronously); - _context.Update(activity, request); + _runtime.Update(request, activity); await this.InvokeWithActivityAsync( - static async ((AgentBase Agent, RpcRequest Request, TaskCompletionSource) state) => + static async ((AgentBase Agent, RpcRequest Request, TaskCompletionSource) state, CancellationToken ct) => { var (self, request, completion) = state; @@ -185,7 +219,7 @@ static async ((AgentBase Agent, RpcRequest Request, TaskCompletionSource + static async ((AgentBase Agent, CloudEvent Event) state, CancellationToken ct) => { - await state.Agent._context.PublishEventAsync(state.Event).ConfigureAwait(false); + await state.Agent._runtime.PublishEventAsync(state.Event).ConfigureAwait(false); }, (this, item), activity, diff --git a/dotnet/src/Microsoft.AutoGen/Agents/AgentBaseExtensions.cs b/dotnet/src/Microsoft.AutoGen/Agents/AgentBaseExtensions.cs index ce1318a0d332..5d738e5fc383 100644 --- a/dotnet/src/Microsoft.AutoGen/Agents/AgentBaseExtensions.cs +++ b/dotnet/src/Microsoft.AutoGen/Agents/AgentBaseExtensions.cs @@ -5,15 +5,25 @@ namespace Microsoft.AutoGen.Agents; +/// +/// Provides extension methods for the class. +/// public static class AgentBaseExtensions { + /// + /// Extracts an from the given agent and metadata. + /// + /// The agent from which to extract the activity. + /// The name of the activity. + /// The metadata containing trace information. + /// The extracted or null if extraction fails. public static Activity? ExtractActivity(this AgentBase agent, string activityName, IDictionary metadata) { Activity? activity; - (var traceParent, var traceState) = agent.Context.GetTraceIDandState(metadata); + var (traceParent, traceState) = agent.Context.GetTraceIdAndState(metadata); if (!string.IsNullOrEmpty(traceParent)) { - if (ActivityContext.TryParse(traceParent, traceState, isRemote: true, out ActivityContext parentContext)) + if (ActivityContext.TryParse(traceParent, traceState, isRemote: true, out var parentContext)) { // traceParent is a W3CId activity = AgentBase.s_source.CreateActivity(activityName, ActivityKind.Server, parentContext); @@ -33,12 +43,9 @@ public static class AgentBaseExtensions var baggage = agent.Context.ExtractMetadata(metadata); - if (baggage is not null) + foreach (var baggageItem in baggage) { - foreach (var baggageItem in baggage) - { - activity.AddBaggage(baggageItem.Key, baggageItem.Value); - } + activity.AddBaggage(baggageItem.Key, baggageItem.Value); } } } @@ -49,7 +56,19 @@ public static class AgentBaseExtensions return activity; } - public static async Task InvokeWithActivityAsync(this AgentBase agent, Func func, TState state, Activity? activity, string methodName, CancellationToken cancellationToken = default) + + /// + /// Invokes a function asynchronously within the context of an . + /// + /// The type of the state parameter. + /// The agent invoking the function. + /// The function to invoke. + /// The state parameter to pass to the function. + /// The activity within which to invoke the function. + /// The name of the method being invoked. + /// A token to monitor for cancellation requests. + /// A task representing the asynchronous operation. + public static async Task InvokeWithActivityAsync(this AgentBase agent, Func func, TState state, Activity? activity, string methodName, CancellationToken cancellationToken = default) { if (activity is not null && activity.StartTimeUtc == default) { @@ -63,7 +82,7 @@ public static async Task InvokeWithActivityAsync(this AgentBase agent, F try { - await func(state).ConfigureAwait(false); + await func(state, cancellationToken).ConfigureAwait(false); if (activity is not null && activity.IsAllDataRequested) { activity.SetStatus(ActivityStatusCode.Ok); diff --git a/dotnet/src/Microsoft.AutoGen/Agents/AgentRuntime.cs b/dotnet/src/Microsoft.AutoGen/Agents/AgentRuntime.cs index 86944cad3ab3..c36d456af32e 100644 --- a/dotnet/src/Microsoft.AutoGen/Agents/AgentRuntime.cs +++ b/dotnet/src/Microsoft.AutoGen/Agents/AgentRuntime.cs @@ -15,7 +15,7 @@ internal sealed class AgentRuntime(AgentId agentId, IAgentWorker worker, ILogger public ILogger Logger { get; } = logger; public IAgentBase? AgentInstance { get; set; } private DistributedContextPropagator DistributedContextPropagator { get; } = distributedContextPropagator; - public (string?, string?) GetTraceIDandState(IDictionary metadata) + public (string?, string?) GetTraceIdAndState(IDictionary metadata) { DistributedContextPropagator.ExtractTraceIdAndState(metadata, static (object? carrier, string fieldName, out string? fieldValue, out IEnumerable? fieldValues) => @@ -28,11 +28,11 @@ internal sealed class AgentRuntime(AgentId agentId, IAgentWorker worker, ILogger out var traceState); return (traceParent, traceState); } - public void Update(Activity? activity, RpcRequest request) + public void Update(RpcRequest request, Activity? activity = null) { DistributedContextPropagator.Inject(activity, request.Metadata, static (carrier, key, value) => ((IDictionary)carrier!)[key] = value); } - public void Update(Activity? activity, CloudEvent cloudEvent) + public void Update(CloudEvent cloudEvent, Activity? activity = null) { DistributedContextPropagator.Inject(activity, cloudEvent.Metadata, static (carrier, key, value) => ((IDictionary)carrier!)[key] = value); } @@ -45,6 +45,10 @@ public async ValueTask SendRequestAsync(IAgentBase agent, RpcRequest request, Ca { await worker.SendRequestAsync(agent, request, cancellationToken).ConfigureAwait(false); } + public async ValueTask SendMessageAsync(Message message, CancellationToken cancellationToken = default) + { + await worker.SendMessageAsync(message, cancellationToken).ConfigureAwait(false); + } public async ValueTask PublishEventAsync(CloudEvent @event, CancellationToken cancellationToken = default) { await worker.PublishEventAsync(@event, cancellationToken).ConfigureAwait(false); diff --git a/dotnet/src/Microsoft.AutoGen/Agents/Agents/AIAgent/InferenceAgent.cs b/dotnet/src/Microsoft.AutoGen/Agents/Agents/AIAgent/InferenceAgent.cs index a0383a3c219d..bf68467e3fa7 100644 --- a/dotnet/src/Microsoft.AutoGen/Agents/Agents/AIAgent/InferenceAgent.cs +++ b/dotnet/src/Microsoft.AutoGen/Agents/Agents/AIAgent/InferenceAgent.cs @@ -5,16 +5,14 @@ using Microsoft.AutoGen.Abstractions; using Microsoft.Extensions.AI; namespace Microsoft.AutoGen.Agents; -public abstract class InferenceAgent : AgentBase where T : IMessage, new() +public abstract class InferenceAgent( + IAgentRuntime context, + EventTypes typeRegistry, + IChatClient client) + : AgentBase(context, typeRegistry) + where T : IMessage, new() { - protected IChatClient ChatClient { get; } - public InferenceAgent( - IAgentRuntime context, - EventTypes typeRegistry, IChatClient client - ) : base(context, typeRegistry) - { - ChatClient = client; - } + protected IChatClient ChatClient { get; } = client; private Task CompleteAsync( IList chatMessages, diff --git a/dotnet/src/Microsoft.AutoGen/Agents/App.cs b/dotnet/src/Microsoft.AutoGen/Agents/App.cs index fc36d3367795..8a233bcd4898 100644 --- a/dotnet/src/Microsoft.AutoGen/Agents/App.cs +++ b/dotnet/src/Microsoft.AutoGen/Agents/App.cs @@ -12,6 +12,7 @@ public static class AgentsApp { // need a variable to store the runtime instance public static WebApplication? Host { get; private set; } + [MemberNotNull(nameof(Host))] public static async ValueTask StartAsync(WebApplicationBuilder? builder = null, AgentTypes? agentTypes = null, bool local = false) { @@ -58,7 +59,7 @@ public static async ValueTask ShutdownAsync() await Host.StopAsync(); } - private static AgentApplicationBuilder AddAgents(this AgentApplicationBuilder builder, AgentTypes? agentTypes) + private static IHostApplicationBuilder AddAgents(this IHostApplicationBuilder builder, AgentTypes? agentTypes) { agentTypes ??= AgentTypes.GetAgentTypesFromAssembly() ?? throw new InvalidOperationException("No agent types found in the assembly"); diff --git a/dotnet/src/Microsoft.AutoGen/Agents/Microsoft.AutoGen.Agents.csproj b/dotnet/src/Microsoft.AutoGen/Agents/Microsoft.AutoGen.Agents.csproj index 3bc2b3acb012..aa79cf9665ae 100644 --- a/dotnet/src/Microsoft.AutoGen/Agents/Microsoft.AutoGen.Agents.csproj +++ b/dotnet/src/Microsoft.AutoGen/Agents/Microsoft.AutoGen.Agents.csproj @@ -4,16 +4,14 @@ net8.0 enable enable - Microsoft.AutoGen.Agents - https://github.com/microsoft/autogen - Microsoft - Micrososft AutoGen Agents SDK - ai-agents;event-driven-agents + + + - + diff --git a/dotnet/src/Microsoft.AutoGen/Agents/Services/AgentWorker.cs b/dotnet/src/Microsoft.AutoGen/Agents/Services/AgentWorker.cs index 490051490315..f9a5050534c8 100644 --- a/dotnet/src/Microsoft.AutoGen/Agents/Services/AgentWorker.cs +++ b/dotnet/src/Microsoft.AutoGen/Agents/Services/AgentWorker.cs @@ -24,6 +24,8 @@ public class AgentWorker : private readonly CancellationTokenSource _shutdownCts; private readonly IServiceProvider _serviceProvider; private readonly IEnumerable> _configuredAgentTypes; + private readonly ConcurrentDictionary _subscriptionsByAgentType = new(); + private readonly ConcurrentDictionary> _subscriptionsByTopic = new(); private readonly DistributedContextPropagator _distributedContextPropagator; private readonly CancellationTokenSource _shutdownCancellationToken = new(); private Task? _mailboxTask; @@ -47,7 +49,7 @@ public async ValueTask PublishEventAsync(CloudEvent cloudEvent, CancellationToke { foreach (var (typeName, _) in _agentTypes) { - if (typeName == "Client") { continue; } + if (typeName == nameof(Client)) { continue; } var agent = GetOrActivateAgent(new AgentId(typeName, cloudEvent.Source)); agent.ReceiveMessage(new Message { CloudEvent = cloudEvent }); } @@ -63,6 +65,10 @@ public ValueTask SendResponseAsync(RpcResponse response, CancellationToken cance { return _mailbox.Writer.WriteAsync(new Message { Response = response }, cancellationToken); } + public ValueTask SendMessageAsync(Message message, CancellationToken cancellationToken = default) + { + return _mailbox.Writer.WriteAsync(message, cancellationToken); + } public ValueTask StoreAsync(AgentState value, CancellationToken cancellationToken = default) { var agentId = value.AgentId ?? throw new InvalidOperationException("AgentId is required when saving AgentState."); @@ -92,7 +98,7 @@ public async Task RunMessagePump() if (message == null) { continue; } switch (message) { - case Message msg: + case Message msg when msg.CloudEvent != null: var item = msg.CloudEvent; @@ -102,6 +108,13 @@ public async Task RunMessagePump() agentToInvoke.ReceiveMessage(msg); } break; + case Message msg when msg.AddSubscriptionRequest != null: + await AddSubscriptionRequestAsync(msg.AddSubscriptionRequest).ConfigureAwait(true); + break; + case Message msg when msg.AddSubscriptionResponse != null: + break; + case Message msg when msg.RegisterAgentTypeResponse != null: + break; default: throw new InvalidOperationException($"Unexpected message '{message}'."); } @@ -115,6 +128,23 @@ public async Task RunMessagePump() } } } + private async ValueTask AddSubscriptionRequestAsync(AddSubscriptionRequest subscription) + { + var topic = subscription.Subscription.TypeSubscription.TopicType; + var agentType = subscription.Subscription.TypeSubscription.AgentType; + _subscriptionsByAgentType[agentType] = subscription.Subscription; + _subscriptionsByTopic.GetOrAdd(topic, _ => []).Add(agentType); + Message response = new() + { + AddSubscriptionResponse = new() + { + RequestId = subscription.RequestId, + Error = "", + Success = true + } + }; + await _mailbox.Writer.WriteAsync(response).ConfigureAwait(false); + } public async Task StartAsync(CancellationToken cancellationToken) { diff --git a/dotnet/src/Microsoft.AutoGen/Agents/Services/AgentWorkerHostingExtensions.cs b/dotnet/src/Microsoft.AutoGen/Agents/Services/AgentWorkerHostingExtensions.cs index c1bf57e2df68..2b578d9bff2e 100644 --- a/dotnet/src/Microsoft.AutoGen/Agents/Services/AgentWorkerHostingExtensions.cs +++ b/dotnet/src/Microsoft.AutoGen/Agents/Services/AgentWorkerHostingExtensions.cs @@ -41,6 +41,7 @@ public static WebApplicationBuilder AddLocalAgentService(this WebApplicationBuil { return builder.AddAgentService(local: true, useGrpc); } + public static WebApplication MapAgentService(this WebApplication app, bool local = false, bool useGrpc = true) { if (useGrpc) { app.MapGrpcService(); } diff --git a/dotnet/src/Microsoft.AutoGen/Agents/Services/Grpc/GrpcAgentWorker.cs b/dotnet/src/Microsoft.AutoGen/Agents/Services/Grpc/GrpcAgentWorker.cs index 431a5629c142..48f07573430d 100644 --- a/dotnet/src/Microsoft.AutoGen/Agents/Services/Grpc/GrpcAgentWorker.cs +++ b/dotnet/src/Microsoft.AutoGen/Agents/Services/Grpc/GrpcAgentWorker.cs @@ -85,6 +85,13 @@ private async Task RunReadPump() } break; + case Message.MessageOneofCase.AddSubscriptionResponse: + if (!message.AddSubscriptionResponse.Success) + { + throw new InvalidOperationException($"Failed to add subscription: '{message.AddSubscriptionResponse.Error}'."); + } + break; + case Message.MessageOneofCase.CloudEvent: // HACK: Send the message to an instance of each agent type @@ -153,6 +160,13 @@ private async Task RunWritePump() item.WriteCompletionSource?.TrySetCanceled(); break; } + catch (RpcException ex) when (ex.StatusCode == StatusCode.Unavailable) + { + // we could not connect to the endpoint - most likely we have the wrong port or failed ssl + // we need to let the user know what port we tried to connect to and then do backoff and retry + _logger.LogError(ex, "Error connecting to GRPC endpoint {Endpoint}.", channel.ToString()); + break; + } catch (Exception ex) when (!_shutdownCts.IsCancellationRequested) { item.WriteCompletionSource?.TrySetException(ex); @@ -230,6 +244,11 @@ await WriteChannelAsync(new Message await WriteChannelAsync(new Message { Request = request }, cancellationToken).ConfigureAwait(false); } // new is intentional + public new async ValueTask SendMessageAsync(Message message, CancellationToken cancellationToken = default) + { + await WriteChannelAsync(message, cancellationToken).ConfigureAwait(false); + } + // new is intentional public new async ValueTask PublishEventAsync(CloudEvent @event, CancellationToken cancellationToken = default) { await WriteChannelAsync(new Message { CloudEvent = @event }, cancellationToken).ConfigureAwait(false); diff --git a/dotnet/src/Microsoft.AutoGen/Agents/Services/Grpc/GrpcAgentWorkerHostBuilderExtension.cs b/dotnet/src/Microsoft.AutoGen/Agents/Services/Grpc/GrpcAgentWorkerHostBuilderExtension.cs index 4c63601dfaa6..4f214caa8203 100644 --- a/dotnet/src/Microsoft.AutoGen/Agents/Services/Grpc/GrpcAgentWorkerHostBuilderExtension.cs +++ b/dotnet/src/Microsoft.AutoGen/Agents/Services/Grpc/GrpcAgentWorkerHostBuilderExtension.cs @@ -11,12 +11,12 @@ namespace Microsoft.AutoGen.Agents; public static class GrpcAgentWorkerHostBuilderExtensions { - private const string _defaultAgentServiceAddress = "https://localhost:5001"; - public static IHostApplicationBuilder AddGrpcAgentWorker(this IHostApplicationBuilder builder, string agentServiceAddress = _defaultAgentServiceAddress) + private const string _defaultAgentServiceAddress = "https://localhost:53071"; + public static IHostApplicationBuilder AddGrpcAgentWorker(this IHostApplicationBuilder builder, string? agentServiceAddress = null) { builder.Services.AddGrpcClient(options => { - options.Address = new Uri(agentServiceAddress); + options.Address = new Uri(agentServiceAddress ?? builder.Configuration["AGENT_HOST"] ?? _defaultAgentServiceAddress); options.ChannelOptionsActions.Add(channelOptions => { diff --git a/dotnet/src/Microsoft.AutoGen/Agents/Services/Grpc/GrpcGateway.cs b/dotnet/src/Microsoft.AutoGen/Agents/Services/Grpc/GrpcGateway.cs index 89e9c55c4648..ab24a0e15fe5 100644 --- a/dotnet/src/Microsoft.AutoGen/Agents/Services/Grpc/GrpcGateway.cs +++ b/dotnet/src/Microsoft.AutoGen/Agents/Services/Grpc/GrpcGateway.cs @@ -16,10 +16,13 @@ public sealed class GrpcGateway : BackgroundService, IGateway private readonly IClusterClient _clusterClient; private readonly ConcurrentDictionary _agentState = new(); private readonly IRegistryGrain _gatewayRegistry; + private readonly ISubscriptionsGrain _subscriptions; private readonly IGateway _reference; // The agents supported by each worker process. private readonly ConcurrentDictionary> _supportedAgentTypes = []; public readonly ConcurrentDictionary _workers = new(); + private readonly ConcurrentDictionary _subscriptionsByAgentType = new(); + private readonly ConcurrentDictionary> _subscriptionsByTopic = new(); // The mapping from agent id to worker process. private readonly ConcurrentDictionary<(string Type, string Key), GrpcWorkerConnection> _agentDirectory = new(); @@ -33,6 +36,7 @@ public GrpcGateway(IClusterClient clusterClient, ILogger logger) _clusterClient = clusterClient; _reference = clusterClient.CreateObjectReference(this); _gatewayRegistry = clusterClient.GetGrain(0); + _subscriptions = clusterClient.GetGrain(0); } public async ValueTask BroadcastEvent(CloudEvent evt) { @@ -102,16 +106,70 @@ internal async Task OnReceivedMessageAsync(GrpcWorkerConnection connection, Mess case Message.MessageOneofCase.RegisterAgentTypeRequest: await RegisterAgentTypeAsync(connection, message.RegisterAgentTypeRequest); break; + case Message.MessageOneofCase.AddSubscriptionRequest: + await AddSubscriptionAsync(connection, message.AddSubscriptionRequest); + break; default: - throw new InvalidOperationException($"Unknown message type for message '{message}'."); + // if it wasn't recognized return bad request + await RespondBadRequestAsync(connection, $"Unknown message type for message '{message}'."); + break; + }; + } + private async ValueTask RespondBadRequestAsync(GrpcWorkerConnection connection, string error) + { + throw new RpcException(new Status(StatusCode.InvalidArgument, error)); + } + private async ValueTask AddSubscriptionAsync(GrpcWorkerConnection connection, AddSubscriptionRequest request) + { + var topic = request.Subscription.TypeSubscription.TopicType; + var agentType = request.Subscription.TypeSubscription.AgentType; + _subscriptionsByAgentType[agentType] = request.Subscription; + _subscriptionsByTopic.GetOrAdd(topic, _ => []).Add(agentType); + await _subscriptions.Subscribe(topic, agentType); + //var response = new AddSubscriptionResponse { RequestId = request.RequestId, Error = "", Success = true }; + Message response = new() + { + AddSubscriptionResponse = new() + { + RequestId = request.RequestId, + Error = "", + Success = true + } }; + await connection.ResponseStream.WriteAsync(response).ConfigureAwait(false); } private async ValueTask RegisterAgentTypeAsync(GrpcWorkerConnection connection, RegisterAgentTypeRequest msg) { connection.AddSupportedType(msg.Type); _supportedAgentTypes.GetOrAdd(msg.Type, _ => []).Add(connection); - await _gatewayRegistry.RegisterAgentType(msg.Type, _reference); + await _gatewayRegistry.RegisterAgentType(msg.Type, _reference).ConfigureAwait(true); + Message response = new() + { + RegisterAgentTypeResponse = new() + { + RequestId = msg.RequestId, + Error = "", + Success = true + } + }; + // add a default subscription for the agent type + //TODO: we should consider having constraints on the namespace or at least migrate all our examples to use well typed namesspaces like com.microsoft.autogen/hello/HelloAgents etc + var subscriptionRequest = new AddSubscriptionRequest + { + RequestId = Guid.NewGuid().ToString(), + Subscription = new Subscription + { + TypeSubscription = new TypeSubscription + { + AgentType = msg.Type, + TopicType = msg.Type + } + } + }; + await AddSubscriptionAsync(connection, subscriptionRequest).ConfigureAwait(true); + + await connection.ResponseStream.WriteAsync(response).ConfigureAwait(false); } private async ValueTask DispatchEventAsync(CloudEvent evt) { diff --git a/dotnet/src/Microsoft.AutoGen/Agents/Services/Host.cs b/dotnet/src/Microsoft.AutoGen/Agents/Services/Host.cs index 5b725af0c9a9..464536d54b21 100644 --- a/dotnet/src/Microsoft.AutoGen/Agents/Services/Host.cs +++ b/dotnet/src/Microsoft.AutoGen/Agents/Services/Host.cs @@ -14,11 +14,11 @@ public static async Task StartAsync(bool local = false, bool use builder.AddServiceDefaults(); if (local) { - builder.AddLocalAgentService(useGrpc); + builder.AddLocalAgentService(useGrpc: useGrpc); } else { - builder.AddAgentService(useGrpc); + builder.AddAgentService(useGrpc: useGrpc); } var app = builder.Build(); app.MapAgentService(local, useGrpc); diff --git a/dotnet/src/Microsoft.AutoGen/Agents/Services/HostBuilderExtensions.cs b/dotnet/src/Microsoft.AutoGen/Agents/Services/HostBuilderExtensions.cs index c3f8a1e38bfd..2b69a983340b 100644 --- a/dotnet/src/Microsoft.AutoGen/Agents/Services/HostBuilderExtensions.cs +++ b/dotnet/src/Microsoft.AutoGen/Agents/Services/HostBuilderExtensions.cs @@ -15,9 +15,25 @@ namespace Microsoft.AutoGen.Agents; public static class HostBuilderExtensions { - private const string _defaultAgentServiceAddress = "https://localhost:5001"; - public static AgentApplicationBuilder AddAgentWorker(this IHostApplicationBuilder builder, string agentServiceAddress = _defaultAgentServiceAddress, bool local = false) + private const string _defaultAgentServiceAddress = "https://localhost:53071"; + + public static IHostApplicationBuilder AddAgent< + [DynamicallyAccessedMembers(DynamicallyAccessedMemberTypes.PublicConstructors)] TAgent>(this IHostApplicationBuilder builder, string typeName) where TAgent : AgentBase + { + builder.Services.AddKeyedSingleton("AgentTypes", (sp, key) => Tuple.Create(typeName, typeof(TAgent))); + + return builder; + } + + public static IHostApplicationBuilder AddAgent(this IHostApplicationBuilder builder, string typeName, Type agentType) { + builder.Services.AddKeyedSingleton("AgentTypes", (sp, key) => Tuple.Create(typeName, agentType)); + return builder; + } + + public static IHostApplicationBuilder AddAgentWorker(this IHostApplicationBuilder builder, string? agentServiceAddress = null, bool local = false) + { + agentServiceAddress ??= builder.Configuration["AGENT_HOST"] ?? _defaultAgentServiceAddress; builder.Services.TryAddSingleton(DistributedContextPropagator.Current); // if !local, then add the gRPC client @@ -106,7 +122,9 @@ public static AgentApplicationBuilder AddAgentWorker(this IHostApplicationBuilde return new EventTypes(typeRegistry, types, eventsForType, typesForEvents); }); builder.Services.AddSingleton(); - return new AgentApplicationBuilder(builder); + builder.Services.AddSingleton(new AgentApplicationBuilder(builder)); + + return builder; } private static MessageDescriptor? GetMessageDescriptor(Type type) diff --git a/dotnet/src/Microsoft.AutoGen/Abstractions/IGateway.cs b/dotnet/src/Microsoft.AutoGen/Agents/Services/IGateway.cs similarity index 84% rename from dotnet/src/Microsoft.AutoGen/Abstractions/IGateway.cs rename to dotnet/src/Microsoft.AutoGen/Agents/Services/IGateway.cs index 79b7b63e7235..539ec3eca435 100644 --- a/dotnet/src/Microsoft.AutoGen/Abstractions/IGateway.cs +++ b/dotnet/src/Microsoft.AutoGen/Agents/Services/IGateway.cs @@ -1,7 +1,8 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // IGateway.cs +using Microsoft.AutoGen.Abstractions; -namespace Microsoft.AutoGen.Abstractions; +namespace Microsoft.AutoGen.Agents; public interface IGateway : IGrainObserver { diff --git a/dotnet/src/Microsoft.AutoGen/Agents/Services/Orleans/AgentStateGrain.cs b/dotnet/src/Microsoft.AutoGen/Agents/Services/Orleans/AgentStateGrain.cs index 50d8c3ad4542..9905f6aebac6 100644 --- a/dotnet/src/Microsoft.AutoGen/Agents/Services/Orleans/AgentStateGrain.cs +++ b/dotnet/src/Microsoft.AutoGen/Agents/Services/Orleans/AgentStateGrain.cs @@ -7,7 +7,8 @@ namespace Microsoft.AutoGen.Agents; internal sealed class AgentStateGrain([PersistentState("state", "AgentStateStore")] IPersistentState state) : Grain, IAgentState { - public async ValueTask WriteStateAsync(AgentState newState, string eTag) + /// + public async ValueTask WriteStateAsync(AgentState newState, string eTag, CancellationToken cancellationToken = default) { // etags for optimistic concurrency control // if the Etag is null, its a new state @@ -27,7 +28,8 @@ public async ValueTask WriteStateAsync(AgentState newState, string eTag) return state.Etag; } - public ValueTask ReadStateAsync() + /// + public ValueTask ReadStateAsync(CancellationToken cancellationToken = default) { return ValueTask.FromResult(state.State); } diff --git a/dotnet/src/Microsoft.AutoGen/Agents/Services/Orleans/ISubscriptionsGrain.cs b/dotnet/src/Microsoft.AutoGen/Agents/Services/Orleans/ISubscriptionsGrain.cs new file mode 100644 index 000000000000..302df9ebff98 --- /dev/null +++ b/dotnet/src/Microsoft.AutoGen/Agents/Services/Orleans/ISubscriptionsGrain.cs @@ -0,0 +1,10 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// ISubscriptionsGrain.cs + +namespace Microsoft.AutoGen.Agents; +public interface ISubscriptionsGrain : IGrainWithIntegerKey +{ + ValueTask Subscribe(string agentType, string topic); + ValueTask Unsubscribe(string agentType, string topic); + ValueTask>> GetSubscriptions(string agentType); +} diff --git a/dotnet/src/Microsoft.AutoGen/Agents/Services/Orleans/OrleansRuntimeHostingExtenions.cs b/dotnet/src/Microsoft.AutoGen/Agents/Services/Orleans/OrleansRuntimeHostingExtenions.cs index cd59bcefc385..374e49f7a500 100644 --- a/dotnet/src/Microsoft.AutoGen/Agents/Services/Orleans/OrleansRuntimeHostingExtenions.cs +++ b/dotnet/src/Microsoft.AutoGen/Agents/Services/Orleans/OrleansRuntimeHostingExtenions.cs @@ -15,11 +15,17 @@ public static class OrleansRuntimeHostingExtenions { public static WebApplicationBuilder AddOrleans(this WebApplicationBuilder builder, bool local = false) { + return builder.AddOrleans(local); + } + public static IHostApplicationBuilder AddOrleans(this IHostApplicationBuilder builder, bool local = false) + { builder.Services.AddSerializer(serializer => serializer.AddProtobufSerializer()); + builder.Services.AddSingleton(); + // Ensure Orleans is added before the hosted service to guarantee that it starts first. //TODO: make all of this configurable - builder.Host.UseOrleans(siloBuilder => + builder.UseOrleans((siloBuilder) => { // Development mode or local mode uses in-memory storage and streams if (builder.Environment.IsDevelopment() || local) @@ -51,16 +57,16 @@ public static WebApplicationBuilder AddOrleans(this WebApplicationBuilder builde options.SystemResponseTimeout = TimeSpan.FromMinutes(3); }); siloBuilder.Configure(options => - { - options.ResponseTimeout = TimeSpan.FromMinutes(3); - }); + { + options.ResponseTimeout = TimeSpan.FromMinutes(3); + }); siloBuilder.UseCosmosClustering(o => - { - o.ConfigureCosmosClient(cosmosDbconnectionString); - o.ContainerName = "AutoGen"; - o.DatabaseName = "clustering"; - o.IsResourceCreationEnabled = true; - }); + { + o.ConfigureCosmosClient(cosmosDbconnectionString); + o.ContainerName = "AutoGen"; + o.DatabaseName = "clustering"; + o.IsResourceCreationEnabled = true; + }); siloBuilder.UseCosmosReminderService(o => { @@ -84,7 +90,7 @@ public static WebApplicationBuilder AddOrleans(this WebApplicationBuilder builde .AddMemoryGrainStorage("PubSubStore"); } }); - builder.Services.AddSingleton(); + return builder; } } diff --git a/dotnet/src/Microsoft.AutoGen/Agents/Services/Orleans/RegistryGrain.cs b/dotnet/src/Microsoft.AutoGen/Agents/Services/Orleans/RegistryGrain.cs index c5114e3e7423..cb7523126436 100644 --- a/dotnet/src/Microsoft.AutoGen/Agents/Services/Orleans/RegistryGrain.cs +++ b/dotnet/src/Microsoft.AutoGen/Agents/Services/Orleans/RegistryGrain.cs @@ -5,7 +5,7 @@ namespace Microsoft.AutoGen.Agents; -public sealed class RegistryGrain : Grain, IRegistryGrain +internal sealed class RegistryGrain : Grain, IRegistryGrain { // TODO: use persistent state for some of these or (better) extend Orleans to implement some of this natively. private readonly Dictionary _workerStates = new(); diff --git a/dotnet/src/Microsoft.AutoGen/Agents/Services/Orleans/SubscriptionsGrain.cs b/dotnet/src/Microsoft.AutoGen/Agents/Services/Orleans/SubscriptionsGrain.cs new file mode 100644 index 000000000000..682073f0b97c --- /dev/null +++ b/dotnet/src/Microsoft.AutoGen/Agents/Services/Orleans/SubscriptionsGrain.cs @@ -0,0 +1,53 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// SubscriptionsGrain.cs + +namespace Microsoft.AutoGen.Agents; + +internal sealed class SubscriptionsGrain([PersistentState("state", "PubSubStore")] IPersistentState state) : Grain, ISubscriptionsGrain +{ + private readonly Dictionary> _subscriptions = new(); + public ValueTask>> GetSubscriptions(string? agentType = null) + { + //if agentType is null, return all subscriptions else filter on agentType + if (agentType != null) + { + return new ValueTask>>(_subscriptions.Where(x => x.Value.Contains(agentType)).ToDictionary(x => x.Key, x => x.Value)); + } + return new ValueTask>>(_subscriptions); + } + public ValueTask Subscribe(string agentType, string topic) + { + if (!_subscriptions.TryGetValue(topic, out var subscriptions)) + { + subscriptions = _subscriptions[topic] = []; + } + if (!subscriptions.Contains(agentType)) + { + subscriptions.Add(agentType); + } + _subscriptions[topic] = subscriptions; + state.State.Subscriptions = _subscriptions; + state.WriteStateAsync(); + + return ValueTask.CompletedTask; + } + public ValueTask Unsubscribe(string agentType, string topic) + { + if (!_subscriptions.TryGetValue(topic, out var subscriptions)) + { + subscriptions = _subscriptions[topic] = []; + } + if (!subscriptions.Contains(agentType)) + { + subscriptions.Remove(agentType); + } + _subscriptions[topic] = subscriptions; + state.State.Subscriptions = _subscriptions; + state.WriteStateAsync(); + return ValueTask.CompletedTask; + } +} +public sealed class SubscriptionsState +{ + public Dictionary> Subscriptions { get; set; } = new(); +} diff --git a/dotnet/src/Microsoft.AutoGen/Extensions/ServiceDefaults/Extensions.cs b/dotnet/src/Microsoft.AutoGen/Extensions/Aspire/AspireHostingExtensions.cs similarity index 98% rename from dotnet/src/Microsoft.AutoGen/Extensions/ServiceDefaults/Extensions.cs rename to dotnet/src/Microsoft.AutoGen/Extensions/Aspire/AspireHostingExtensions.cs index d69a0c6862b0..b43eb459625e 100644 --- a/dotnet/src/Microsoft.AutoGen/Extensions/ServiceDefaults/Extensions.cs +++ b/dotnet/src/Microsoft.AutoGen/Extensions/Aspire/AspireHostingExtensions.cs @@ -1,5 +1,5 @@ // Copyright (c) Microsoft Corporation. All rights reserved. -// Extensions.cs +// AspireHostingExtensions.cs using Microsoft.AspNetCore.Builder; using Microsoft.AspNetCore.Diagnostics.HealthChecks; @@ -15,7 +15,7 @@ namespace Microsoft.Extensions.Hosting; // Adds common .NET Aspire services: service discovery, resilience, health checks, and OpenTelemetry. // This project should be referenced by each service project in your solution. // To learn more about using this project, see https://aka.ms/dotnet/aspire/service-defaults -public static class Extensions +public static class AspireHostingExtensions { public static IHostApplicationBuilder AddServiceDefaults(this IHostApplicationBuilder builder) { diff --git a/dotnet/src/Microsoft.AutoGen/Extensions/ServiceDefaults/Microsoft.AutoGen.ServiceDefaults.csproj b/dotnet/src/Microsoft.AutoGen/Extensions/Aspire/Microsoft.AutoGen.Extensions.Aspire.csproj similarity index 93% rename from dotnet/src/Microsoft.AutoGen/Extensions/ServiceDefaults/Microsoft.AutoGen.ServiceDefaults.csproj rename to dotnet/src/Microsoft.AutoGen/Extensions/Aspire/Microsoft.AutoGen.Extensions.Aspire.csproj index cf2446f93349..0cab61bc27b6 100644 --- a/dotnet/src/Microsoft.AutoGen/Extensions/ServiceDefaults/Microsoft.AutoGen.ServiceDefaults.csproj +++ b/dotnet/src/Microsoft.AutoGen/Extensions/Aspire/Microsoft.AutoGen.Extensions.Aspire.csproj @@ -5,6 +5,9 @@ enable true + + + diff --git a/dotnet/src/Microsoft.AutoGen/Extensions/AIModelClientHostingExtensions/AIModelClientHostingExtensions.cs b/dotnet/src/Microsoft.AutoGen/Extensions/MEAI/MEAIHostingExtensions.cs similarity index 94% rename from dotnet/src/Microsoft.AutoGen/Extensions/AIModelClientHostingExtensions/AIModelClientHostingExtensions.cs rename to dotnet/src/Microsoft.AutoGen/Extensions/MEAI/MEAIHostingExtensions.cs index c3c9c197392d..d39f358f8cbe 100644 --- a/dotnet/src/Microsoft.AutoGen/Extensions/AIModelClientHostingExtensions/AIModelClientHostingExtensions.cs +++ b/dotnet/src/Microsoft.AutoGen/Extensions/MEAI/MEAIHostingExtensions.cs @@ -1,11 +1,11 @@ // Copyright (c) Microsoft Corporation. All rights reserved. -// AIModelClientHostingExtensions.cs +// MEAIHostingExtensions.cs using Microsoft.Extensions.AI; namespace Microsoft.Extensions.Hosting; -public static class AIModelClient +public static class MEAIHostingExtensions { public static IHostApplicationBuilder AddChatCompletionService(this IHostApplicationBuilder builder, string serviceName) { diff --git a/dotnet/src/Microsoft.AutoGen/Extensions/AIModelClientHostingExtensions/AIModelClientHostingExtensions.csproj b/dotnet/src/Microsoft.AutoGen/Extensions/MEAI/Microsoft.AutoGen.Extensions.MEAI.csproj similarity index 87% rename from dotnet/src/Microsoft.AutoGen/Extensions/AIModelClientHostingExtensions/AIModelClientHostingExtensions.csproj rename to dotnet/src/Microsoft.AutoGen/Extensions/MEAI/Microsoft.AutoGen.Extensions.MEAI.csproj index 2358351deb6c..b8233a8e6c50 100644 --- a/dotnet/src/Microsoft.AutoGen/Extensions/AIModelClientHostingExtensions/AIModelClientHostingExtensions.csproj +++ b/dotnet/src/Microsoft.AutoGen/Extensions/MEAI/Microsoft.AutoGen.Extensions.MEAI.csproj @@ -4,6 +4,9 @@ enable enable + + + @@ -14,6 +17,6 @@ - + diff --git a/dotnet/src/Microsoft.AutoGen/Extensions/AIModelClientHostingExtensions/Options/AIClientOptions.cs b/dotnet/src/Microsoft.AutoGen/Extensions/MEAI/Options/AIClientOptions.cs similarity index 100% rename from dotnet/src/Microsoft.AutoGen/Extensions/AIModelClientHostingExtensions/Options/AIClientOptions.cs rename to dotnet/src/Microsoft.AutoGen/Extensions/MEAI/Options/AIClientOptions.cs diff --git a/dotnet/src/Microsoft.AutoGen/Extensions/AIModelClientHostingExtensions/ServiceCollectionChatCompletionExtensions.cs b/dotnet/src/Microsoft.AutoGen/Extensions/MEAI/ServiceCollectionChatCompletionExtensions.cs similarity index 100% rename from dotnet/src/Microsoft.AutoGen/Extensions/AIModelClientHostingExtensions/ServiceCollectionChatCompletionExtensions.cs rename to dotnet/src/Microsoft.AutoGen/Extensions/MEAI/ServiceCollectionChatCompletionExtensions.cs diff --git a/dotnet/src/Microsoft.AutoGen/Extensions/SemanticKernel/Microsoft.AutoGen.Extensions.SemanticKernel.csproj b/dotnet/src/Microsoft.AutoGen/Extensions/SemanticKernel/Microsoft.AutoGen.Extensions.SemanticKernel.csproj index fb47750fd44d..c4ac5536e70c 100644 --- a/dotnet/src/Microsoft.AutoGen/Extensions/SemanticKernel/Microsoft.AutoGen.Extensions.SemanticKernel.csproj +++ b/dotnet/src/Microsoft.AutoGen/Extensions/SemanticKernel/Microsoft.AutoGen.Extensions.SemanticKernel.csproj @@ -1,22 +1,23 @@ - - - - - - net8.0 enable enable - - - + + + + + + + + + + diff --git a/dotnet/test/AutoGen.AotCompatibility.Tests/AutoGen.AotCompatibility.Tests.csproj b/dotnet/test/AutoGen.AotCompatibility.Tests/AutoGen.AotCompatibility.Tests.csproj index aec9660bb922..379bca541012 100644 --- a/dotnet/test/AutoGen.AotCompatibility.Tests/AutoGen.AotCompatibility.Tests.csproj +++ b/dotnet/test/AutoGen.AotCompatibility.Tests/AutoGen.AotCompatibility.Tests.csproj @@ -2,7 +2,7 @@ Exe - net8.0 + net8.0 enable enable true diff --git a/dotnet/test/AutoGen.OpenAI.Tests/RolePlayToolCallOrchestratorTests.cs b/dotnet/test/AutoGen.OpenAI.Tests/RolePlayToolCallOrchestratorTests.cs new file mode 100644 index 000000000000..807bf41e9479 --- /dev/null +++ b/dotnet/test/AutoGen.OpenAI.Tests/RolePlayToolCallOrchestratorTests.cs @@ -0,0 +1,269 @@ +// Copyright (c) Microsoft Corporation. All rights reserved. +// RolePlayToolCallOrchestratorTests.cs + +using System; +using System.Collections.Generic; +using System.Threading.Tasks; +using AutoGen.OpenAI.Orchestrator; +using AutoGen.Tests; +using Azure.AI.OpenAI; +using FluentAssertions; +using Moq; +using OpenAI; +using OpenAI.Chat; +using Xunit; + +namespace AutoGen.OpenAI.Tests; + +public class RolePlayToolCallOrchestratorTests +{ + [Fact] + public async Task ItReturnNullWhenNoCandidateIsAvailableAsync() + { + var chatClient = Mock.Of(); + var orchestrator = new RolePlayToolCallOrchestrator(chatClient); + var context = new OrchestrationContext + { + Candidates = [], + ChatHistory = [], + }; + + var speaker = await orchestrator.GetNextSpeakerAsync(context); + speaker.Should().BeNull(); + } + + [Fact] + public async Task ItReturnCandidateWhenOnlyOneCandidateIsAvailableAsync() + { + var chatClient = Mock.Of(); + var alice = new EchoAgent("Alice"); + var orchestrator = new RolePlayToolCallOrchestrator(chatClient); + var context = new OrchestrationContext + { + Candidates = [alice], + ChatHistory = [], + }; + + var speaker = await orchestrator.GetNextSpeakerAsync(context); + speaker.Should().Be(alice); + } + + [Fact] + public async Task ItSelectNextSpeakerFromWorkflowIfProvided() + { + var workflow = new Graph(); + var alice = new EchoAgent("Alice"); + var bob = new EchoAgent("Bob"); + var charlie = new EchoAgent("Charlie"); + workflow.AddTransition(Transition.Create(alice, bob)); + workflow.AddTransition(Transition.Create(bob, charlie)); + workflow.AddTransition(Transition.Create(charlie, alice)); + + var client = Mock.Of(); + var orchestrator = new RolePlayToolCallOrchestrator(client, workflow); + var context = new OrchestrationContext + { + Candidates = [alice, bob, charlie], + ChatHistory = + [ + new TextMessage(Role.User, "Hello, Bob", from: "Alice"), + ], + }; + + var speaker = await orchestrator.GetNextSpeakerAsync(context); + speaker.Should().Be(bob); + } + + [Fact] + public async Task ItReturnNullIfNoAvailableAgentFromWorkflowAsync() + { + var workflow = new Graph(); + var alice = new EchoAgent("Alice"); + var bob = new EchoAgent("Bob"); + workflow.AddTransition(Transition.Create(alice, bob)); + + var client = Mock.Of(); + var orchestrator = new RolePlayToolCallOrchestrator(client, workflow); + var context = new OrchestrationContext + { + Candidates = [alice, bob], + ChatHistory = + [ + new TextMessage(Role.User, "Hello, Alice", from: "Bob"), + ], + }; + + var speaker = await orchestrator.GetNextSpeakerAsync(context); + speaker.Should().BeNull(); + } + + [ApiKeyFact("AZURE_OPENAI_API_KEY", "AZURE_OPENAI_ENDPOINT", "AZURE_OPENAI_DEPLOY_NAME")] + public async Task GPT_3_5_CoderReviewerRunnerTestAsync() + { + var endpoint = Environment.GetEnvironmentVariable("AZURE_OPENAI_ENDPOINT") ?? throw new Exception("Please set AZURE_OPENAI_ENDPOINT environment variable."); + var key = Environment.GetEnvironmentVariable("AZURE_OPENAI_API_KEY") ?? throw new Exception("Please set AZURE_OPENAI_API_KEY environment variable."); + var deployName = Environment.GetEnvironmentVariable("AZURE_OPENAI_DEPLOY_NAME") ?? throw new Exception("Please set AZURE_OPENAI_DEPLOY_NAME environment variable."); + var openaiClient = new AzureOpenAIClient(new Uri(endpoint), new System.ClientModel.ApiKeyCredential(key)); + var chatClient = openaiClient.GetChatClient(deployName); + + await BusinessWorkflowTest(chatClient); + await CoderReviewerRunnerTestAsync(chatClient); + } + + [ApiKeyFact("OPENAI_API_KEY")] + public async Task GPT_4o_CoderReviewerRunnerTestAsync() + { + var apiKey = Environment.GetEnvironmentVariable("OPENAI_API_KEY") ?? throw new InvalidOperationException("OPENAI_API_KEY is not set"); + var model = "gpt-4o"; + var openaiClient = new OpenAIClient(apiKey); + var chatClient = openaiClient.GetChatClient(model); + + await BusinessWorkflowTest(chatClient); + await CoderReviewerRunnerTestAsync(chatClient); + } + + [ApiKeyFact("OPENAI_API_KEY")] + public async Task GPT_4o_mini_CoderReviewerRunnerTestAsync() + { + var apiKey = Environment.GetEnvironmentVariable("OPENAI_API_KEY") ?? throw new InvalidOperationException("OPENAI_API_KEY is not set"); + var model = "gpt-4o-mini"; + var openaiClient = new OpenAIClient(apiKey); + var chatClient = openaiClient.GetChatClient(model); + + await BusinessWorkflowTest(chatClient); + await CoderReviewerRunnerTestAsync(chatClient); + } + + /// + /// This test is to mimic the conversation among coder, reviewer and runner. + /// The coder will write the code, the reviewer will review the code, and the runner will run the code. + /// + /// + /// + private async Task CoderReviewerRunnerTestAsync(ChatClient client) + { + var coder = new EchoAgent("Coder"); + var reviewer = new EchoAgent("Reviewer"); + var runner = new EchoAgent("Runner"); + var user = new EchoAgent("User"); + var initializeMessage = new List + { + new TextMessage(Role.User, "Hello, I am user, I will provide the coding task, please write the code first, then review and run it", from: "User"), + new TextMessage(Role.User, "Hello, I am coder, I will write the code", from: "Coder"), + new TextMessage(Role.User, "Hello, I am reviewer, I will review the code", from: "Reviewer"), + new TextMessage(Role.User, "Hello, I am runner, I will run the code", from: "Runner"), + new TextMessage(Role.User, "how to print 'hello world' using C#", from: user.Name), + }; + + var chatHistory = new List() + { + new TextMessage(Role.User, """ + ```csharp + Console.WriteLine("Hello World"); + ``` + """, from: coder.Name), + new TextMessage(Role.User, "The code looks good", from: reviewer.Name), + new TextMessage(Role.User, "The code runs successfully, the output is 'Hello World'", from: runner.Name), + }; + + var orchestrator = new RolePlayToolCallOrchestrator(client); + foreach (var message in chatHistory) + { + var context = new OrchestrationContext + { + Candidates = [coder, reviewer, runner, user], + ChatHistory = initializeMessage, + }; + + var speaker = await orchestrator.GetNextSpeakerAsync(context); + speaker!.Name.Should().Be(message.From); + initializeMessage.Add(message); + } + + // the last next speaker should be the user + var lastSpeaker = await orchestrator.GetNextSpeakerAsync(new OrchestrationContext + { + Candidates = [coder, reviewer, runner, user], + ChatHistory = initializeMessage, + }); + + lastSpeaker!.Name.Should().Be(user.Name); + } + + // test if the tool call orchestrator still run business workflow when the conversation is not in English + private async Task BusinessWorkflowTest(ChatClient client) + { + var ceo = new EchoAgent("乙方首席执行官"); + var pm = new EchoAgent("乙方项目经理"); + var dev = new EchoAgent("乙方开发人员"); + var user = new EchoAgent("甲方"); + var initializeMessage = new List + { + new TextMessage(Role.User, "你好,我是你们的甲方", from: user.Name), + new TextMessage(Role.User, "你好,我是乙方首席执行官,我将负责对接甲方和给项目经理及开发人员分配任务", from: ceo.Name), + new TextMessage(Role.User, "你好,我是乙方项目经理,我将负责项目的进度和质量", from: pm.Name), + new TextMessage(Role.User, "你好,我是乙方开发人员 我将负责项目的具体开发", from: dev.Name), + new TextMessage(Role.User, "开发一个淘宝,预算1W", from: user.Name), + }; + + var workflow = new Graph(); + workflow.AddTransition(Transition.Create(ceo, pm)); + workflow.AddTransition(Transition.Create(ceo, dev)); + workflow.AddTransition(Transition.Create(pm, ceo)); + workflow.AddTransition(Transition.Create(dev, ceo)); + workflow.AddTransition(Transition.Create(user, ceo)); + workflow.AddTransition(Transition.Create(ceo, user)); + + var chatHistory = new List() + { + new TextMessage(Role.User, """ + 项目经理,如何使用1W预算开发一个淘宝 + """, from: ceo.Name), + new TextMessage(Role.User, """ + 对于1万预算开发淘宝类网站,以下是关键建议: + 技术选择: + - 使用开源电商系统节省成本, 选择便宜但稳定的云服务器和域名,预算2000元/年 + - 核心功能优先 + - 人员安排: + - 找1位全栈开发,负责系统搭建(6000元) + - 1位兼职UI设计(2000元) + - 进度规划: + - 基础功能1个月完成,后续根据运营情况逐步优化。 + """, from: pm.Name), + new TextMessage(Role.User, "好的,开发人员,请根据项目经理的规划开始开发", from: ceo.Name), + new TextMessage(Role.User, """ + 好的,已开发完毕 + ```html + + ``` + """, from: dev.Name), + new TextMessage(Role.User, "好的,项目已完成,甲方请付款", from: ceo.Name), + }; + + var orchestrator = new RolePlayToolCallOrchestrator(client, workflow); + + foreach (var message in chatHistory) + { + var context = new OrchestrationContext + { + Candidates = [ceo, pm, dev, user], + ChatHistory = initializeMessage, + }; + + var speaker = await orchestrator.GetNextSpeakerAsync(context); + speaker!.Name.Should().Be(message.From); + initializeMessage.Add(message); + } + + // the last next speaker should be the user + var lastSpeaker = await orchestrator.GetNextSpeakerAsync(new OrchestrationContext + { + Candidates = [ceo, pm, dev, user], + ChatHistory = initializeMessage, + }); + + lastSpeaker!.Name.Should().Be(user.Name); + } +} diff --git a/dotnet/test/AutoGen.Tests/AutoGen.Tests.csproj b/dotnet/test/AutoGen.Tests/AutoGen.Tests.csproj index 248a9e29b00d..367d74619bb4 100644 --- a/dotnet/test/AutoGen.Tests/AutoGen.Tests.csproj +++ b/dotnet/test/AutoGen.Tests/AutoGen.Tests.csproj @@ -14,7 +14,7 @@ - + diff --git a/dotnet/test/AutoGen.Tests/ImageMessageTests.cs b/dotnet/test/AutoGen.Tests/ImageMessageTests.cs index e8a30c87012c..bb256a170f2a 100644 --- a/dotnet/test/AutoGen.Tests/ImageMessageTests.cs +++ b/dotnet/test/AutoGen.Tests/ImageMessageTests.cs @@ -35,4 +35,18 @@ public async Task ItCreateFromUrl() imageMessage.MimeType.Should().Be("image/png"); imageMessage.Data.Should().BeNull(); } + + [Fact] + public async Task ItCreateFromBase64Url() + { + var image = Path.Combine("testData", "images", "background.png"); + var binary = File.ReadAllBytes(image); + var base64 = Convert.ToBase64String(binary); + + var base64Url = $"data:image/png;base64,{base64}"; + var imageMessage = new ImageMessage(Role.User, base64Url); + + imageMessage.BuildDataUri().Should().Be(base64Url); + imageMessage.MimeType.Should().Be("image/png"); + } } diff --git a/dotnet/test/Microsoft.AutoGen.Agents.Tests/AgentBaseTests.cs b/dotnet/test/Microsoft.AutoGen.Agents.Tests/AgentBaseTests.cs index 0def7029f001..7b5182e614b7 100644 --- a/dotnet/test/Microsoft.AutoGen.Agents.Tests/AgentBaseTests.cs +++ b/dotnet/test/Microsoft.AutoGen.Agents.Tests/AgentBaseTests.cs @@ -1,21 +1,33 @@ // Copyright (c) Microsoft Corporation. All rights reserved. // AgentBaseTests.cs +using System.Collections.Concurrent; using FluentAssertions; using Google.Protobuf.Reflection; +using Microsoft.AspNetCore.Builder; using Microsoft.AutoGen.Abstractions; +using Microsoft.Extensions.DependencyInjection; +using Microsoft.Extensions.Hosting; using Microsoft.Extensions.Logging; using Moq; using Xunit; +using static Microsoft.AutoGen.Agents.Tests.AgentBaseTests; namespace Microsoft.AutoGen.Agents.Tests; -public class AgentBaseTests +[Collection(ClusterFixtureCollection.Name)] +public class AgentBaseTests(InMemoryAgentRuntimeFixture fixture) { + private readonly InMemoryAgentRuntimeFixture _fixture = fixture; + [Fact] public async Task ItInvokeRightHandlerTestAsync() { var mockContext = new Mock(); + mockContext.SetupGet(x => x.AgentId).Returns(new AgentId("test", "test")); + // mock SendMessageAsync + mockContext.Setup(x => x.SendMessageAsync(It.IsAny(), It.IsAny())) + .Returns(new ValueTask()); var agent = new TestAgent(mockContext.Object, new EventTypes(TypeRegistry.Empty, [], [], []), new Logger(new LoggerFactory())); await agent.HandleObject("hello world"); @@ -26,12 +38,36 @@ public async Task ItInvokeRightHandlerTestAsync() agent.ReceivedItems[1].Should().Be(42); } + [Fact] + public async Task ItDelegateMessageToTestAgentAsync() + { + var client = _fixture.AppHost.Services.GetRequiredService(); + + await client.PublishMessageAsync(new TextMessage() + { + Source = nameof(ItDelegateMessageToTestAgentAsync), + TextMessage_ = "buffer" + }, token: CancellationToken.None); + + // wait for 10 seconds + var cts = new CancellationTokenSource(TimeSpan.FromSeconds(10)); + while (!TestAgent.ReceivedMessages.ContainsKey(nameof(ItDelegateMessageToTestAgentAsync)) && !cts.Token.IsCancellationRequested) + { + await Task.Delay(100); + } + + TestAgent.ReceivedMessages[nameof(ItDelegateMessageToTestAgentAsync)].Should().NotBeNull(); + } + /// /// The test agent is a simple agent that is used for testing purposes. /// - public class TestAgent : AgentBase, IHandle, IHandle + public class TestAgent : AgentBase, IHandle, IHandle, IHandle { - public TestAgent(IAgentRuntime context, EventTypes eventTypes, Logger logger) : base(context, eventTypes, logger) + public TestAgent( + IAgentRuntime context, + [FromKeyedServices("EventTypes")] EventTypes eventTypes, + Logger? logger = null) : base(context, eventTypes, logger) { } @@ -47,6 +83,49 @@ public Task Handle(int item) return Task.CompletedTask; } + public Task Handle(TextMessage item) + { + ReceivedMessages[item.Source] = item.TextMessage_; + return Task.CompletedTask; + } + public List ReceivedItems { get; private set; } = []; + + /// + /// Key: source + /// Value: message + /// + public static ConcurrentDictionary ReceivedMessages { get; private set; } = new(); + } +} + +public sealed class InMemoryAgentRuntimeFixture : IDisposable +{ + public InMemoryAgentRuntimeFixture() + { + var builder = WebApplication.CreateBuilder(); + + // step 1: create in-memory agent runtime + // step 2: register TestAgent to that agent runtime + builder + .AddAgentService(local: true, useGrpc: false) + .AddAgentWorker(local: true) + .AddAgent(nameof(TestAgent)); + + AppHost = builder.Build(); + AppHost.StartAsync().Wait(); + } + public IHost AppHost { get; } + + void IDisposable.Dispose() + { + AppHost.StopAsync().Wait(); + AppHost.Dispose(); } } + +[CollectionDefinition(Name)] +public sealed class ClusterFixtureCollection : ICollectionFixture +{ + public const string Name = nameof(ClusterFixtureCollection); +} diff --git a/dotnet/test/Microsoft.AutoGen.Agents.Tests/Microsoft.AutoGen.Agents.Tests.csproj b/dotnet/test/Microsoft.AutoGen.Agents.Tests/Microsoft.AutoGen.Agents.Tests.csproj index db7467bf1235..4abf1dc834d6 100644 --- a/dotnet/test/Microsoft.AutoGen.Agents.Tests/Microsoft.AutoGen.Agents.Tests.csproj +++ b/dotnet/test/Microsoft.AutoGen.Agents.Tests/Microsoft.AutoGen.Agents.Tests.csproj @@ -9,6 +9,7 @@ + diff --git a/dotnet/website/articles/Installation.md b/dotnet/website/articles/Installation.md index 30b55442d246..b421304b04be 100644 --- a/dotnet/website/articles/Installation.md +++ b/dotnet/website/articles/Installation.md @@ -32,9 +32,7 @@ dotnet add package AUTOGEN_PACKAGES ### Consume nightly build To consume nightly build, you can add one of the following feeds to your `NuGet.config` or global nuget config: -- ![Static Badge](https://img.shields.io/badge/public-blue?style=flat) ![Static Badge](https://img.shields.io/badge/github-grey?style=flat): https://nuget.pkg.github.com/microsoft/index.json -- ![Static Badge](https://img.shields.io/badge/public-blue?style=flat) ![Static Badge](https://img.shields.io/badge/myget-grey?style=flat): https://www.myget.org/F/agentchat/api/v3/index.json -- ![Static Badge](https://img.shields.io/badge/internal-blue?style=flat) ![Static Badge](https://img.shields.io/badge/azure_devops-grey?style=flat) : https://devdiv.pkgs.visualstudio.com/DevDiv/_packaging/AutoGen/nuget/v3/index.json +> - [![Static Badge](https://img.shields.io/badge/azure_devops-grey?style=flat)](https://dev.azure.com/AGPublish/AGPublic/_artifacts/feed/AutoGen-Nightly) : To add a local `NuGet.config`, create a file named `NuGet.config` in the root of your project and add the following content: ```xml @@ -42,8 +40,6 @@ To add a local `NuGet.config`, create a file named `NuGet.config` in the root of - - diff --git a/dotnet/website/articles/getting-start.md b/dotnet/website/articles/getting-start.md index fe10a597aacd..0d4bf3316364 100644 --- a/dotnet/website/articles/getting-start.md +++ b/dotnet/website/articles/getting-start.md @@ -1,6 +1,5 @@ ### Get start with AutoGen for dotnet [![dotnet-ci](https://github.com/microsoft/autogen/actions/workflows/dotnet-build.yml/badge.svg)](https://github.com/microsoft/autogen/actions/workflows/dotnet-build.yml) -[![Discord](https://img.shields.io/discord/1153072414184452236?logo=discord&style=flat)](https://discord.gg/pAbnFJrkgZ) [![NuGet version](https://badge.fury.io/nu/AutoGen.Core.svg)](https://badge.fury.io/nu/AutoGen.Core) Firstly, add `AutoGen` package to your project. diff --git a/dotnet/website/release_note/toc.yml b/dotnet/website/release_note/toc.yml index 6f070c70b861..5a423078ac64 100644 --- a/dotnet/website/release_note/toc.yml +++ b/dotnet/website/release_note/toc.yml @@ -2,7 +2,7 @@ href: 0.2.2.md - name: 0.2.1 -href: 0.2.1.md + href: 0.2.1.md - name: 0.2.0 href: 0.2.0.md @@ -17,4 +17,4 @@ href: 0.2.1.md href: 0.0.16.md - name: 0.0.0 - 0.0.15 - href: update.md \ No newline at end of file + href: update.md diff --git a/protos/agent_events.proto b/protos/agent_events.proto index 811c888c6429..5fd88bf8c441 100644 --- a/protos/agent_events.proto +++ b/protos/agent_events.proto @@ -3,7 +3,10 @@ syntax = "proto3"; package agents; option csharp_namespace = "Microsoft.AutoGen.Abstractions"; - +message TextMessage { + string textMessage = 1; + string source = 2; +} message Input { string message = 1; } diff --git a/protos/agent_worker.proto b/protos/agent_worker.proto index 7b0b5245dd3e..61b00333cd24 100644 --- a/protos/agent_worker.proto +++ b/protos/agent_worker.proto @@ -63,9 +63,15 @@ message TypeSubscription { string agent_type = 2; } +message TypePrefixSubscription { + string topic_type_prefix = 1; + string agent_type = 2; +} + message Subscription { oneof subscription { TypeSubscription typeSubscription = 1; + TypePrefixSubscription typePrefixSubscription = 2; } } diff --git a/python/.gitignore b/python/.gitignore index 186e847cc125..677a888f2f49 100644 --- a/python/.gitignore +++ b/python/.gitignore @@ -172,3 +172,6 @@ docs/**/jupyter_execute # Temporary files tmp_code_*.py + +# .NET Development settings +appsettings.Development.json \ No newline at end of file diff --git a/python/README.md b/python/README.md index 5b012ff60bf9..69085b4c8dd2 100644 --- a/python/README.md +++ b/python/README.md @@ -1,8 +1,11 @@ # AutoGen Python packages -See [`autogen-core`](./packages/autogen-core/) package for main functionality. +[![0.4 Docs](https://img.shields.io/badge/Docs-0.4-blue)](https://microsoft.github.io/autogen/dev/) +[![PyPi autogen-core](https://img.shields.io/badge/PyPi-autogen--core-blue?logo=pypi)](https://pypi.org/project/autogen-core/0.4.0.dev7/) [![PyPi autogen-agentchat](https://img.shields.io/badge/PyPi-autogen--agentchat-blue?logo=pypi)](https://pypi.org/project/autogen-agentchat/0.4.0.dev7/) [![PyPi autogen-ext](https://img.shields.io/badge/PyPi-autogen--ext-blue?logo=pypi)](https://pypi.org/project/autogen-ext/0.4.0.dev7/) +This directory works as a single `uv` workspace containing all project packages. See [`packages`](./packages/) to discover all project packages. + ## Development **TL;DR**, run all checks with: diff --git a/python/packages/agbench/README.md b/python/packages/agbench/README.md index e0b9c1c84694..a8209a1e9d25 100644 --- a/python/packages/agbench/README.md +++ b/python/packages/agbench/README.md @@ -10,7 +10,7 @@ If you are already an AutoGenBench pro, and want the full technical specificatio ## Docker Requirement -AutoGenBench also requires Docker (Desktop or Engine). **It will not run in GitHub codespaces**, unless you opt for native execution (with is strongly discouraged). To install Docker Desktop see [https://www.docker.com/products/docker-desktop/](https://www.docker.com/products/docker-desktop/). +AutoGenBench also requires Docker (Desktop or Engine). **It will not run in GitHub codespaces**, unless you opt for native execution (which is strongly discouraged). To install Docker Desktop see [https://www.docker.com/products/docker-desktop/](https://www.docker.com/products/docker-desktop/). If you are working in WSL, you can follow the instructions below to set up your environment: diff --git a/python/packages/autogen-agentchat/pyproject.toml b/python/packages/autogen-agentchat/pyproject.toml index ab5b83e5ee4d..c2336a6eeba8 100644 --- a/python/packages/autogen-agentchat/pyproject.toml +++ b/python/packages/autogen-agentchat/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "autogen-agentchat" -version = "0.4.0.dev6" +version = "0.4.0.dev7" license = {file = "LICENSE-CODE"} description = "AutoGen agents and teams library" readme = "README.md" @@ -15,7 +15,7 @@ classifiers = [ "Operating System :: OS Independent", ] dependencies = [ - "autogen-core==0.4.0.dev6", + "autogen-core==0.4.0.dev7", ] [tool.uv] diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/__init__.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/__init__.py index cd435bf0228a..4cff9f45822b 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/__init__.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/__init__.py @@ -4,6 +4,7 @@ from ._coding_assistant_agent import CodingAssistantAgent from ._society_of_mind_agent import SocietyOfMindAgent from ._tool_use_assistant_agent import ToolUseAssistantAgent +from ._user_proxy_agent import UserProxyAgent __all__ = [ "BaseChatAgent", @@ -13,4 +14,5 @@ "CodingAssistantAgent", "ToolUseAssistantAgent", "SocietyOfMindAgent", + "UserProxyAgent", ] diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py index 989c6f62c760..0870a6c2f3b0 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_assistant_agent.py @@ -82,13 +82,13 @@ def _handoff_tool() -> str: class AssistantAgent(BaseChatAgent): """An agent that provides assistance with tool use. - It responds with a StopMessage when 'terminate' is detected in the response. - Args: name (str): The name of the agent. model_client (ChatCompletionClient): The model client to use for inference. tools (List[Tool | Callable[..., Any] | Callable[..., Awaitable[Any]]] | None, optional): The tools to register with the agent. - handoffs (List[Handoff | str] | None, optional): The handoff configurations for the agent, allowing it to transfer to other agents by responding with a HandoffMessage. + handoffs (List[Handoff | str] | None, optional): The handoff configurations for the agent, + allowing it to transfer to other agents by responding with a :class:`HandoffMessage`. + The transfer is only executed when the team is in :class:`~autogen_agentchat.teams.Swarm`. If a handoff is a string, it should represent the target agent's name. description (str, optional): The description of the agent. system_message (str, optional): The system message for the model. @@ -126,7 +126,8 @@ async def main() -> None: The following example demonstrates how to create an assistant agent with - a model client and a tool, and generate a stream of messages for a task. + a model client and a tool, generate a stream of messages for a task, and + print the messages to the console. .. code-block:: python @@ -134,6 +135,7 @@ async def main() -> None: from autogen_ext.models import OpenAIChatCompletionClient from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.messages import TextMessage + from autogen_agentchat.task import Console from autogen_core.base import CancellationToken @@ -145,13 +147,12 @@ async def main() -> None: model_client = OpenAIChatCompletionClient(model="gpt-4o") agent = AssistantAgent(name="assistant", model_client=model_client, tools=[get_current_time]) - stream = agent.on_messages_stream( - [TextMessage(content="What is the current time?", source="user")], CancellationToken() + await Console( + agent.on_messages_stream( + [TextMessage(content="What is the current time?", source="user")], CancellationToken() + ) ) - async for message in stream: - print(message) - asyncio.run(main()) @@ -165,7 +166,7 @@ def __init__( tools: List[Tool | Callable[..., Any] | Callable[..., Awaitable[Any]]] | None = None, handoffs: List[Handoff | str] | None = None, description: str = "An agent that provides assistance with ability to use tools.", - system_message: str = "You are a helpful AI assistant. Solve tasks using your tools. Reply with 'TERMINATE' when the task has been completed.", + system_message: str = "You are a helpful AI assistant. Solve tasks using your tools. Reply with TERMINATE when the task has been completed.", ): super().__init__(name=name, description=description) self._model_client = model_client @@ -278,8 +279,9 @@ async def on_messages_stream( return # Generate an inference result based on the current model context. + llm_messages = self._system_messages + self._model_context result = await self._model_client.create( - self._model_context, tools=self._tools + self._handoff_tools, cancellation_token=cancellation_token + llm_messages, tools=self._tools + self._handoff_tools, cancellation_token=cancellation_token ) self._model_context.append(AssistantMessage(content=result.content, source=self.name)) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_base_chat_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_base_chat_agent.py index 5fd4c1854cfd..622a4d491c97 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_base_chat_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_base_chat_agent.py @@ -4,7 +4,7 @@ from autogen_core.base import CancellationToken from ..base import ChatAgent, Response, TaskResult -from ..messages import AgentMessage, ChatMessage, MultiModalMessage, TextMessage +from ..messages import AgentMessage, ChatMessage, HandoffMessage, MultiModalMessage, StopMessage, TextMessage class BaseChatAgent(ChatAgent, ABC): @@ -54,7 +54,7 @@ async def on_messages_stream( async def run( self, *, - task: str | TextMessage | MultiModalMessage | None = None, + task: str | ChatMessage | None = None, cancellation_token: CancellationToken | None = None, ) -> TaskResult: """Run the agent with the given task and return the result.""" @@ -62,13 +62,17 @@ async def run( cancellation_token = CancellationToken() input_messages: List[ChatMessage] = [] output_messages: List[AgentMessage] = [] - if isinstance(task, str): + if task is None: + pass + elif isinstance(task, str): text_msg = TextMessage(content=task, source="user") input_messages.append(text_msg) output_messages.append(text_msg) - elif isinstance(task, TextMessage | MultiModalMessage): + elif isinstance(task, TextMessage | MultiModalMessage | StopMessage | HandoffMessage): input_messages.append(task) output_messages.append(task) + else: + raise ValueError(f"Invalid task type: {type(task)}") response = await self.on_messages(input_messages, cancellation_token) if response.inner_messages is not None: output_messages += response.inner_messages @@ -78,7 +82,7 @@ async def run( async def run_stream( self, *, - task: str | TextMessage | MultiModalMessage | None = None, + task: str | ChatMessage | None = None, cancellation_token: CancellationToken | None = None, ) -> AsyncGenerator[AgentMessage | TaskResult, None]: """Run the agent with the given task and return a stream of messages @@ -87,15 +91,19 @@ async def run_stream( cancellation_token = CancellationToken() input_messages: List[ChatMessage] = [] output_messages: List[AgentMessage] = [] - if isinstance(task, str): + if task is None: + pass + elif isinstance(task, str): text_msg = TextMessage(content=task, source="user") input_messages.append(text_msg) output_messages.append(text_msg) yield text_msg - elif isinstance(task, TextMessage | MultiModalMessage): + elif isinstance(task, TextMessage | MultiModalMessage | StopMessage | HandoffMessage): input_messages.append(task) output_messages.append(task) yield task + else: + raise ValueError(f"Invalid task type: {type(task)}") async for message in self.on_messages_stream(input_messages, cancellation_token): if isinstance(message, Response): yield message.chat_message diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_code_executor_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_code_executor_agent.py index 7f5447fb5cc1..5dd4c9008ef5 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_code_executor_agent.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_code_executor_agent.py @@ -9,7 +9,53 @@ class CodeExecutorAgent(BaseChatAgent): - """An agent that executes code snippets and report the results.""" + """An agent that extracts and executes code snippets found in received messages and returns the output. + + It is typically used within a team with another agent that generates code snippets to be executed. + + .. note:: + + It is recommended that the `CodeExecutorAgent` agent uses a Docker container to execute code. This ensures that model-generated code is executed in an isolated environment. To use Docker, your environment must have Docker installed and running. + Follow the installation instructions for `Docker `_. + + In this example, we show how to set up a `CodeExecutorAgent` agent that uses the + :py:class:`~autogen_ext.code_executors.DockerCommandLineCodeExecutor` + to execute code snippets in a Docker container. The `work_dir` parameter indicates where all executed files are first saved locally before being executed in the Docker container. + + .. code-block:: python + + import asyncio + from autogen_agentchat.agents import CodeExecutorAgent + from autogen_agentchat.messages import TextMessage + from autogen_ext.code_executors import DockerCommandLineCodeExecutor + from autogen_core.base import CancellationToken + + + async def run_code_executor_agent() -> None: + # Create a code executor agent that uses a Docker container to execute code. + code_executor = DockerCommandLineCodeExecutor(work_dir="coding") + await code_executor.start() + code_executor_agent = CodeExecutorAgent("code_executor", code_executor=code_executor) + + # Run the agent with a given code snippet. + task = TextMessage( + content='''Here is some code + ```python + print('Hello world') + ``` + ''', + source="user", + ) + response = await code_executor_agent.on_messages([task], CancellationToken()) + print(response.chat_message) + + # Stop the code executor. + await code_executor.stop() + + + asyncio.run(run_code_executor_agent()) + + """ def __init__( self, diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py new file mode 100644 index 000000000000..bdaca53ddc6c --- /dev/null +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/agents/_user_proxy_agent.py @@ -0,0 +1,89 @@ +import asyncio +from inspect import iscoroutinefunction +from typing import Awaitable, Callable, List, Optional, Sequence, Union, cast + +from autogen_core.base import CancellationToken + +from ..base import Response +from ..messages import ChatMessage, HandoffMessage, TextMessage +from ._base_chat_agent import BaseChatAgent + +# Define input function types more precisely +SyncInputFunc = Callable[[str], str] +AsyncInputFunc = Callable[[str, Optional[CancellationToken]], Awaitable[str]] +InputFuncType = Union[SyncInputFunc, AsyncInputFunc] + + +class UserProxyAgent(BaseChatAgent): + """An agent that can represent a human user in a chat.""" + + def __init__( + self, + name: str, + description: str = "a human user", + input_func: Optional[InputFuncType] = None, + ) -> None: + """Initialize the UserProxyAgent.""" + super().__init__(name=name, description=description) + self.input_func = input_func or input + self._is_async = iscoroutinefunction(self.input_func) + + @property + def produced_message_types(self) -> List[type[ChatMessage]]: + """Message types this agent can produce.""" + return [TextMessage, HandoffMessage] + + def _get_latest_handoff(self, messages: Sequence[ChatMessage]) -> Optional[HandoffMessage]: + """Find the most recent HandoffMessage in the message sequence.""" + for message in reversed(messages): + if isinstance(message, HandoffMessage): + return message + return None + + async def _get_input(self, prompt: str, cancellation_token: Optional[CancellationToken]) -> str: + """Handle input based on function signature.""" + try: + if self._is_async: + # Cast to AsyncInputFunc for proper typing + async_func = cast(AsyncInputFunc, self.input_func) + return await async_func(prompt, cancellation_token) + else: + # Cast to SyncInputFunc for proper typing + sync_func = cast(SyncInputFunc, self.input_func) + loop = asyncio.get_event_loop() + return await loop.run_in_executor(None, sync_func, prompt) + + except asyncio.CancelledError: + raise + except Exception as e: + raise RuntimeError(f"Failed to get user input: {str(e)}") from e + + async def on_messages( + self, messages: Sequence[ChatMessage], cancellation_token: Optional[CancellationToken] = None + ) -> Response: + """Handle incoming messages by requesting user input.""" + try: + # Check for handoff first + handoff = self._get_latest_handoff(messages) + prompt = ( + f"Handoff received from {handoff.source}. Enter your response: " if handoff else "Enter your response: " + ) + + user_input = await self._get_input(prompt, cancellation_token) + + # Return appropriate message type based on handoff presence + if handoff: + return Response( + chat_message=HandoffMessage(content=user_input, target=handoff.source, source=self.name) + ) + else: + return Response(chat_message=TextMessage(content=user_input, source=self.name)) + + except asyncio.CancelledError: + raise + except Exception as e: + raise RuntimeError(f"Failed to get user input: {str(e)}") from e + + async def on_reset(self, cancellation_token: Optional[CancellationToken] = None) -> None: + """Reset agent state.""" + pass diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_task.py b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_task.py index 0a5e37dce26b..d2cb39eb6fd2 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_task.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_task.py @@ -3,7 +3,7 @@ from autogen_core.base import CancellationToken -from ..messages import AgentMessage, MultiModalMessage, TextMessage +from ..messages import AgentMessage, ChatMessage @dataclass @@ -23,7 +23,7 @@ class TaskRunner(Protocol): async def run( self, *, - task: str | TextMessage | MultiModalMessage | None = None, + task: str | ChatMessage | None = None, cancellation_token: CancellationToken | None = None, ) -> TaskResult: """Run the task and return the result. @@ -36,7 +36,7 @@ async def run( def run_stream( self, *, - task: str | TextMessage | MultiModalMessage | None = None, + task: str | ChatMessage | None = None, cancellation_token: CancellationToken | None = None, ) -> AsyncGenerator[AgentMessage | TaskResult, None]: """Run the task and produces a stream of messages and the final result diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_termination.py b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_termination.py index 859740fa093e..c923e8ced51c 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/base/_termination.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/base/_termination.py @@ -23,15 +23,15 @@ class TerminationCondition(ABC): .. code-block:: python import asyncio - from autogen_agentchat.teams import MaxTurnsTermination, TextMentionTermination + from autogen_agentchat.task import MaxMessageTermination, TextMentionTermination async def main() -> None: # Terminate the conversation after 10 turns or if the text "TERMINATE" is mentioned. - cond1 = MaxTurnsTermination(10) | TextMentionTermination("TERMINATE") + cond1 = MaxMessageTermination(10) | TextMentionTermination("TERMINATE") # Terminate the conversation after 10 turns and if the text "TERMINATE" is mentioned. - cond2 = MaxTurnsTermination(10) & TextMentionTermination("TERMINATE") + cond2 = MaxMessageTermination(10) & TextMentionTermination("TERMINATE") # ... diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/task/__init__.py b/python/packages/autogen-agentchat/src/autogen_agentchat/task/__init__.py index 2a57b0e9cb62..d863c87cd13a 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/task/__init__.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/task/__init__.py @@ -1,7 +1,9 @@ from ._console import Console from ._terminations import ( + ExternalTermination, HandoffTermination, MaxMessageTermination, + SourceMatchTermination, StopMessageTermination, TextMentionTermination, TimeoutTermination, @@ -15,5 +17,7 @@ "TokenUsageTermination", "HandoffTermination", "TimeoutTermination", + "ExternalTermination", + "SourceMatchTermination", "Console", ] diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/task/_console.py b/python/packages/autogen-agentchat/src/autogen_agentchat/task/_console.py index 7e4d1cab9879..9899366cdc07 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/task/_console.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/task/_console.py @@ -1,19 +1,51 @@ +import os import sys import time -from typing import AsyncGenerator, List +from typing import AsyncGenerator, List, Optional, TypeVar, cast +from autogen_core.components import Image from autogen_core.components.models import RequestUsage -from autogen_agentchat.base import TaskResult +from autogen_agentchat.base import Response, TaskResult from autogen_agentchat.messages import AgentMessage, MultiModalMessage -async def Console(stream: AsyncGenerator[AgentMessage | TaskResult, None]) -> None: - """Consume the stream from :meth:`~autogen_agentchat.teams.Team.run_stream` - and print the messages to the console.""" +def _is_running_in_iterm() -> bool: + return os.getenv("TERM_PROGRAM") == "iTerm.app" + +def _is_output_a_tty() -> bool: + return sys.stdout.isatty() + + +T = TypeVar("T", bound=TaskResult | Response) + + +async def Console( + stream: AsyncGenerator[AgentMessage | T, None], + *, + no_inline_images: bool = False, +) -> T: + """ + Consumes the message stream from :meth:`~autogen_agentchat.base.TaskRunner.run_stream` + or :meth:`~autogen_agentchat.base.ChatAgent.on_messages_stream` and renders the messages to the console. + Returns the last processed TaskResult or Response. + + Args: + stream (AsyncGenerator[AgentMessage | TaskResult, None] | AsyncGenerator[AgentMessage | Response, None]): Message stream to render. + This can be from :meth:`~autogen_agentchat.base.TaskRunner.run_stream` or :meth:`~autogen_agentchat.base.ChatAgent.on_messages_stream`. + no_inline_images (bool, optional): If terminal is iTerm2 will render images inline. Use this to disable this behavior. Defaults to False. + + Returns: + last_processed: A :class:`~autogen_agentchat.base.TaskResult` if the stream is from :meth:`~autogen_agentchat.base.TaskRunner.run_stream` + or a :class:`~autogen_agentchat.base.Response` if the stream is from :meth:`~autogen_agentchat.base.ChatAgent.on_messages_stream`. + """ + render_image_iterm = _is_running_in_iterm() and _is_output_a_tty() and not no_inline_images start_time = time.time() total_usage = RequestUsage(prompt_tokens=0, completion_tokens=0) + + last_processed: Optional[T] = None + async for message in stream: if isinstance(message, TaskResult): duration = time.time() - start_time @@ -26,23 +58,69 @@ async def Console(stream: AsyncGenerator[AgentMessage | TaskResult, None]) -> No f"Duration: {duration:.2f} seconds\n" ) sys.stdout.write(output) + # mypy ignore + last_processed = message # type: ignore + + elif isinstance(message, Response): + duration = time.time() - start_time + + # Print final response. + output = f"{'-' * 10} {message.chat_message.source} {'-' * 10}\n{_message_to_str(message.chat_message, render_image_iterm=render_image_iterm)}\n" + if message.chat_message.models_usage: + output += f"[Prompt tokens: {message.chat_message.models_usage.prompt_tokens}, Completion tokens: {message.chat_message.models_usage.completion_tokens}]\n" + total_usage.completion_tokens += message.chat_message.models_usage.completion_tokens + total_usage.prompt_tokens += message.chat_message.models_usage.prompt_tokens + sys.stdout.write(output) + + # Print summary. + if message.inner_messages is not None: + num_inner_messages = len(message.inner_messages) + else: + num_inner_messages = 0 + output = ( + f"{'-' * 10} Summary {'-' * 10}\n" + f"Number of inner messages: {num_inner_messages}\n" + f"Total prompt tokens: {total_usage.prompt_tokens}\n" + f"Total completion tokens: {total_usage.completion_tokens}\n" + f"Duration: {duration:.2f} seconds\n" + ) + sys.stdout.write(output) + # mypy ignore + last_processed = message # type: ignore + else: - output = f"{'-' * 10} {message.source} {'-' * 10}\n{_message_to_str(message)}\n" + # Cast required for mypy to be happy + message = cast(AgentMessage, message) # type: ignore + output = f"{'-' * 10} {message.source} {'-' * 10}\n{_message_to_str(message, render_image_iterm=render_image_iterm)}\n" if message.models_usage: output += f"[Prompt tokens: {message.models_usage.prompt_tokens}, Completion tokens: {message.models_usage.completion_tokens}]\n" total_usage.completion_tokens += message.models_usage.completion_tokens total_usage.prompt_tokens += message.models_usage.prompt_tokens sys.stdout.write(output) + if last_processed is None: + raise ValueError("No TaskResult or Response was processed.") + + return last_processed + + +# iTerm2 image rendering protocol: https://iterm2.com/documentation-images.html +def _image_to_iterm(image: Image) -> str: + image_data = image.to_base64() + return f"\033]1337;File=inline=1:{image_data}\a\n" + -def _message_to_str(message: AgentMessage) -> str: +def _message_to_str(message: AgentMessage, *, render_image_iterm: bool = False) -> str: if isinstance(message, MultiModalMessage): result: List[str] = [] for c in message.content: if isinstance(c, str): result.append(c) else: - result.append("") + if render_image_iterm: + result.append(_image_to_iterm(c)) + else: + result.append("") return "\n".join(result) else: return f"{message.content}" diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/task/_terminations.py b/python/packages/autogen-agentchat/src/autogen_agentchat/task/_terminations.py index 16d44b9840d3..9db5b584a31d 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/task/_terminations.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/task/_terminations.py @@ -1,5 +1,5 @@ import time -from typing import Sequence +from typing import List, Sequence from ..base import TerminatedException, TerminationCondition from ..messages import AgentMessage, HandoffMessage, MultiModalMessage, StopMessage, TextMessage @@ -208,3 +208,81 @@ async def __call__(self, messages: Sequence[AgentMessage]) -> StopMessage | None async def reset(self) -> None: self._start_time = time.monotonic() self._terminated = False + + +class ExternalTermination(TerminationCondition): + """A termination condition that is externally controlled + by calling the :meth:`set` method. + + Example: + + .. code-block:: python + + from autogen_agentchat.task import ExternalTermination + + termination = ExternalTermination() + + # Run the team in an asyncio task. + ... + + # Set the termination condition externally + termination.set() + + """ + + def __init__(self) -> None: + self._terminated = False + self._setted = False + + @property + def terminated(self) -> bool: + return self._terminated + + def set(self) -> None: + """Set the termination condition to terminated.""" + self._setted = True + + async def __call__(self, messages: Sequence[AgentMessage]) -> StopMessage | None: + if self._terminated: + raise TerminatedException("Termination condition has already been reached") + if self._setted: + self._terminated = True + return StopMessage(content="External termination requested", source="ExternalTermination") + return None + + async def reset(self) -> None: + self._terminated = False + self._setted = False + + +class SourceMatchTermination(TerminationCondition): + """Terminate the conversation after a specific source responds. + + Args: + sources (List[str]): List of source names to terminate the conversation. + + Raises: + TerminatedException: If the termination condition has already been reached. + """ + + def __init__(self, sources: List[str]) -> None: + self._sources = sources + self._terminated = False + + @property + def terminated(self) -> bool: + return self._terminated + + async def __call__(self, messages: Sequence[AgentMessage]) -> StopMessage | None: + if self._terminated: + raise TerminatedException("Termination condition has already been reached") + if not messages: + return None + for message in messages: + if message.source in self._sources: + self._terminated = True + return StopMessage(content=f"'{message.source}' answered", source="SourceMatchTermination") + return None + + async def reset(self) -> None: + self._terminated = False diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py index c41ebaf461bf..fbca2644920a 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat.py @@ -12,13 +12,12 @@ AgentType, CancellationToken, MessageContext, - TopicId, ) from autogen_core.components import ClosureAgent, TypeSubscription from ... import EVENT_LOGGER_NAME from ...base import ChatAgent, TaskResult, Team, TerminationCondition -from ...messages import AgentMessage, MultiModalMessage, TextMessage +from ...messages import AgentMessage, ChatMessage, HandoffMessage, MultiModalMessage, StopMessage, TextMessage from ._chat_agent_container import ChatAgentContainer from ._events import GroupChatMessage, GroupChatReset, GroupChatStart, GroupChatTermination from ._sequential_routed_agent import SequentialRoutedAgent @@ -164,7 +163,7 @@ async def collect_output_messages( async def run( self, *, - task: str | TextMessage | MultiModalMessage | None = None, + task: str | ChatMessage | None = None, cancellation_token: CancellationToken | None = None, ) -> TaskResult: """Run the team and return the result. The base implementation uses @@ -215,7 +214,7 @@ async def main() -> None: async def run_stream( self, *, - task: str | TextMessage | MultiModalMessage | None = None, + task: str | ChatMessage | None = None, cancellation_token: CancellationToken | None = None, ) -> AsyncGenerator[AgentMessage | TaskResult, None]: """Run the team and produces a stream of messages and the final result @@ -253,6 +252,16 @@ async def main() -> None: asyncio.run(main()) """ + # Create the first chat message if the task is a string or a chat message. + first_chat_message: ChatMessage | None = None + if task is None: + pass + elif isinstance(task, str): + first_chat_message = TextMessage(content=task, source="user") + elif isinstance(task, TextMessage | MultiModalMessage | StopMessage | HandoffMessage): + first_chat_message = task + else: + raise ValueError(f"Invalid task type: {type(task)}") if self._is_running: raise ValueError("The team is already running, it cannot run again until it is stopped.") @@ -265,17 +274,6 @@ async def main() -> None: if not self._initialized: await self._init(self._runtime) - # Run the team by publishing the start message. - first_chat_message: TextMessage | MultiModalMessage | None = None - if isinstance(task, str): - first_chat_message = TextMessage(content=task, source="user") - elif isinstance(task, TextMessage | MultiModalMessage): - first_chat_message = task - await self._runtime.publish_message( - GroupChatStart(message=first_chat_message), - topic_id=TopicId(type=self._group_topic_type, source=self._team_id), - ) - # Start a coroutine to stop the runtime and signal the output message queue is complete. async def stop_runtime() -> None: await self._runtime.stop_when_idle() @@ -283,24 +281,37 @@ async def stop_runtime() -> None: shutdown_task = asyncio.create_task(stop_runtime()) - # Collect the output messages in order. - output_messages: List[AgentMessage] = [] - # Yield the messsages until the queue is empty. - while True: - message = await self._output_message_queue.get() - if message is None: - break - yield message - output_messages.append(message) - - # Wait for the shutdown task to finish. - await shutdown_task - - # Yield the final result. - yield TaskResult(messages=output_messages, stop_reason=self._stop_reason) - - # Indicate that the team is no longer running. - self._is_running = False + try: + # Run the team by sending the start message to the group chat manager. + # The group chat manager will start the group chat by relaying the message to the participants + # and the closure agent. + await self._runtime.send_message( + GroupChatStart(message=first_chat_message), + recipient=AgentId(type=self._group_chat_manager_topic_type, key=self._team_id), + ) + # Collect the output messages in order. + output_messages: List[AgentMessage] = [] + # Yield the messsages until the queue is empty. + while True: + message = await self._output_message_queue.get() + if message is None: + break + yield message + output_messages.append(message) + + # Yield the final result. + yield TaskResult(messages=output_messages, stop_reason=self._stop_reason) + + finally: + # Wait for the shutdown task to finish. + await shutdown_task + + # Clear the output message queue. + while not self._output_message_queue.empty(): + self._output_message_queue.get_nowait() + + # Indicate that the team is no longer running. + self._is_running = False async def reset(self) -> None: """Reset the team and its participants to their initial state. @@ -352,19 +363,26 @@ async def main() -> None: # Start the runtime. self._runtime.start() - # Send a reset message to the group chat. - await self._runtime.publish_message( - GroupChatReset(), - topic_id=TopicId(type=self._group_topic_type, source=self._team_id), - ) - - # Stop the runtime. - await self._runtime.stop_when_idle() + try: + # Send a reset messages to all participants. + for participant_topic_type in self._participant_topic_types: + await self._runtime.send_message( + GroupChatReset(), + recipient=AgentId(type=participant_topic_type, key=self._team_id), + ) + # Send a reset message to the group chat manager. + await self._runtime.send_message( + GroupChatReset(), + recipient=AgentId(type=self._group_chat_manager_topic_type, key=self._team_id), + ) + finally: + # Stop the runtime. + await self._runtime.stop_when_idle() - # Reset the output message queue. - self._stop_reason = None - while not self._output_message_queue.empty(): - self._output_message_queue.get_nowait() + # Reset the output message queue. + self._stop_reason = None + while not self._output_message_queue.empty(): + self._output_message_queue.get_nowait() - # Indicate that the team is no longer running. - self._is_running = False + # Indicate that the team is no longer running. + self._is_running = False diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat_manager.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat_manager.py index e28f46a8c89d..d2a2b917690b 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat_manager.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_base_group_chat_manager.py @@ -2,10 +2,10 @@ from typing import Any, List from autogen_core.base import MessageContext -from autogen_core.components import DefaultTopicId, event +from autogen_core.components import DefaultTopicId, event, rpc from ...base import TerminationCondition -from ...messages import AgentMessage, StopMessage +from ...messages import AgentMessage, ChatMessage, StopMessage from ._events import ( GroupChatAgentResponse, GroupChatRequestPublish, @@ -55,7 +55,7 @@ def __init__( self._max_turns = max_turns self._current_turn = 0 - @event + @rpc async def handle_start(self, message: GroupChatStart, ctx: MessageContext) -> None: """Handle the start of a group chat by selecting a speaker to start the conversation.""" @@ -70,10 +70,16 @@ async def handle_start(self, message: GroupChatStart, ctx: MessageContext) -> No # Stop the group chat. return + # Validate the group state given the start message. + await self.validate_group_state(message.message) + if message.message is not None: # Log the start message. await self.publish_message(message, topic_id=DefaultTopicId(type=self._output_topic_type)) + # Relay the start message to the participants. + await self.publish_message(message, topic_id=DefaultTopicId(type=self._group_topic_type)) + # Append the user message to the message thread. self._message_thread.append(message.message) @@ -137,11 +143,16 @@ async def handle_agent_response(self, message: GroupChatAgentResponse, ctx: Mess speaker_topic_type = await self.select_speaker(self._message_thread) await self.publish_message(GroupChatRequestPublish(), topic_id=DefaultTopicId(type=speaker_topic_type)) - @event + @rpc async def handle_reset(self, message: GroupChatReset, ctx: MessageContext) -> None: # Reset the group chat manager. await self.reset() + @abstractmethod + async def validate_group_state(self, message: ChatMessage | None) -> None: + """Validate the state of the group chat given the start message. This is executed when the group chat manager receives a GroupChatStart event.""" + ... + @abstractmethod async def select_speaker(self, thread: List[AgentMessage]) -> str: """Select a speaker from the participants and return the diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_chat_agent_container.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_chat_agent_container.py index d249676fda06..315708032865 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_chat_agent_container.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_chat_agent_container.py @@ -1,7 +1,7 @@ from typing import Any, List from autogen_core.base import MessageContext -from autogen_core.components import DefaultTopicId, event +from autogen_core.components import DefaultTopicId, event, rpc from ...base import ChatAgent, Response from ...messages import ChatMessage @@ -38,7 +38,7 @@ async def handle_agent_response(self, message: GroupChatAgentResponse, ctx: Mess """Handle an agent response event by appending the content to the buffer.""" self._message_buffer.append(message.agent_response.chat_message) - @event + @rpc async def handle_reset(self, message: GroupChatReset, ctx: MessageContext) -> None: """Handle a reset event by resetting the agent.""" self._message_buffer.clear() diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py index 5410673ebde2..ae1567a7daec 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_magentic_one/_magentic_one_orchestrator.py @@ -2,7 +2,7 @@ from typing import Any, List from autogen_core.base import MessageContext -from autogen_core.components import DefaultTopicId, Image, event +from autogen_core.components import DefaultTopicId, Image, event, rpc from autogen_core.components.models import ( AssistantMessage, ChatCompletionClient, @@ -102,7 +102,7 @@ def _get_task_ledger_plan_update_prompt(self, team: str) -> str: def _get_final_answer_prompt(self, task: str) -> str: return ORCHESTRATOR_FINAL_ANSWER_PROMPT.format(task=task) - @event + @rpc async def handle_start(self, message: GroupChatStart, ctx: MessageContext) -> None: """Handle the start of a group chat by selecting a speaker to start the conversation.""" assert message is not None and message.message is not None @@ -145,7 +145,7 @@ async def handle_agent_response(self, message: GroupChatAgentResponse, ctx: Mess self._message_thread.append(message.agent_response.chat_message) await self._orchestrate_step() - @event + @rpc async def handle_reset(self, message: GroupChatReset, ctx: MessageContext) -> None: # Reset the group chat manager. await self.reset() diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_round_robin_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_round_robin_group_chat.py index 8e3a262a85da..f5c128a6cc27 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_round_robin_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_round_robin_group_chat.py @@ -1,7 +1,7 @@ from typing import Callable, List from ...base import ChatAgent, TerminationCondition -from ...messages import AgentMessage +from ...messages import AgentMessage, ChatMessage from ._base_group_chat import BaseGroupChat from ._base_group_chat_manager import BaseGroupChatManager @@ -28,6 +28,9 @@ def __init__( ) self._next_speaker_index = 0 + async def validate_group_state(self, message: ChatMessage | None) -> None: + pass + async def reset(self) -> None: self._current_turn = 0 self._message_thread.clear() @@ -68,7 +71,7 @@ class RoundRobinGroupChat(BaseGroupChat): from autogen_ext.models import OpenAIChatCompletionClient from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.teams import RoundRobinGroupChat - from autogen_agentchat.task import TextMentionTermination + from autogen_agentchat.task import TextMentionTermination, Console async def main() -> None: @@ -84,9 +87,7 @@ async def get_weather(location: str) -> str: ) termination = TextMentionTermination("TERMINATE") team = RoundRobinGroupChat([assistant], termination_condition=termination) - stream = team.run_stream("What's the weather in New York?") - async for message in stream: - print(message) + await Console(team.run_stream(task="What's the weather in New York?")) asyncio.run(main()) @@ -99,7 +100,7 @@ async def get_weather(location: str) -> str: from autogen_ext.models import OpenAIChatCompletionClient from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.teams import RoundRobinGroupChat - from autogen_agentchat.task import TextMentionTermination + from autogen_agentchat.task import TextMentionTermination, Console async def main() -> None: @@ -109,9 +110,7 @@ async def main() -> None: agent2 = AssistantAgent("Assistant2", model_client=model_client) termination = TextMentionTermination("TERMINATE") team = RoundRobinGroupChat([agent1, agent2], termination_condition=termination) - stream = team.run_stream("Tell me some jokes.") - async for message in stream: - print(message) + await Console(team.run_stream(task="Tell me some jokes.")) asyncio.run(main()) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py index f618c0d3832c..cfcebd3783ce 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_selector_group_chat.py @@ -8,6 +8,7 @@ from ...base import ChatAgent, TerminationCondition from ...messages import ( AgentMessage, + ChatMessage, HandoffMessage, MultiModalMessage, StopMessage, @@ -53,6 +54,9 @@ def __init__( self._allow_repeated_speaker = allow_repeated_speaker self._selector_func = selector_func + async def validate_group_state(self, message: ChatMessage | None) -> None: + pass + async def reset(self) -> None: self._current_turn = 0 self._message_thread.clear() @@ -204,7 +208,7 @@ class SelectorGroupChat(BaseGroupChat): from autogen_ext.models import OpenAIChatCompletionClient from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.teams import SelectorGroupChat - from autogen_agentchat.task import TextMentionTermination + from autogen_agentchat.task import TextMentionTermination, Console async def main() -> None: @@ -243,9 +247,7 @@ async def book_trip() -> str: model_client=model_client, termination_condition=termination, ) - stream = team.run_stream("Book a 3-day trip to new york.") - async for message in stream: - print(message) + await Console(team.run_stream(task="Book a 3-day trip to new york.")) asyncio.run(main()) @@ -255,16 +257,18 @@ async def book_trip() -> str: .. code-block:: python import asyncio + from typing import Sequence from autogen_ext.models import OpenAIChatCompletionClient from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.teams import SelectorGroupChat - from autogen_agentchat.task import TextMentionTermination + from autogen_agentchat.task import TextMentionTermination, Console + from autogen_agentchat.messages import AgentMessage async def main() -> None: model_client = OpenAIChatCompletionClient(model="gpt-4o") - def check_caculation(x: int, y: int, answer: int) -> str: + def check_calculation(x: int, y: int, answer: int) -> str: if x + y == answer: return "Correct!" else: @@ -279,12 +283,12 @@ def check_caculation(x: int, y: int, answer: int) -> str: agent2 = AssistantAgent( "Agent2", model_client, - tools=[check_caculation], + tools=[check_calculation], description="For checking calculation", system_message="Check the answer and respond with 'Correct!' or 'Incorrect!'", ) - def selector_func(messages): + def selector_func(messages: Sequence[AgentMessage]) -> str | None: if len(messages) == 1 or messages[-1].content == "Incorrect!": return "Agent1" if messages[-1].source == "Agent1": @@ -299,9 +303,7 @@ def selector_func(messages): termination_condition=termination, ) - stream = team.run_stream("What is 1 + 1?") - async for message in stream: - print(message) + await Console(team.run_stream(task="What is 1 + 1?")) asyncio.run(main()) diff --git a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_swarm_group_chat.py b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_swarm_group_chat.py index 651367169b67..0e658ab75237 100644 --- a/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_swarm_group_chat.py +++ b/python/packages/autogen-agentchat/src/autogen_agentchat/teams/_group_chat/_swarm_group_chat.py @@ -3,7 +3,7 @@ from ... import EVENT_LOGGER_NAME from ...base import ChatAgent, TerminationCondition -from ...messages import AgentMessage, HandoffMessage +from ...messages import AgentMessage, ChatMessage, HandoffMessage from ._base_group_chat import BaseGroupChat from ._base_group_chat_manager import BaseGroupChatManager @@ -32,6 +32,31 @@ def __init__( ) self._current_speaker = participant_topic_types[0] + async def validate_group_state(self, message: ChatMessage | None) -> None: + """Validate the start message for the group chat.""" + # Check if the start message is a handoff message. + if isinstance(message, HandoffMessage): + if message.target not in self._participant_topic_types: + raise ValueError( + f"The target {message.target} is not one of the participants {self._participant_topic_types}. " + "If you are resuming Swarm with a new HandoffMessage make sure to set the target to a valid participant as the target." + ) + return + # Check if there is a handoff message in the thread that is not targeting a valid participant. + for existing_message in reversed(self._message_thread): + if isinstance(existing_message, HandoffMessage): + if existing_message.target not in self._participant_topic_types: + raise ValueError( + f"The existing handoff target {existing_message.target} is not one of the participants {self._participant_topic_types}. " + "If you are resuming Swarm with a new task make sure to include in your task " + "a HandoffMessage with a valid participant as the target. For example, if you are " + "resuming from a HandoffTermination, make sure the new task is a HandoffMessage " + "with a valid participant as the target." + ) + # The latest handoff message should always target a valid participant. + # Do not look past the latest handoff message. + return + async def reset(self) -> None: self._current_turn = 0 self._message_thread.clear() @@ -47,13 +72,8 @@ async def select_speaker(self, thread: List[AgentMessage]) -> str: for message in reversed(thread): if isinstance(message, HandoffMessage): self._current_speaker = message.target - if self._current_speaker not in self._participant_topic_types: - raise ValueError( - f"The target {self._current_speaker} in the handoff message " - f"is not one of the participants {self._participant_topic_types}. " - "If you are resuming the Swarm with a new task make sure to include in your task " - "a handoff message with a valid participant as the target." - ) + # The latest handoff message should always target a valid participant. + assert self._current_speaker in self._participant_topic_types return self._current_speaker return self._current_speaker @@ -72,7 +92,7 @@ class Swarm(BaseGroupChat): Without a termination condition, the group chat will run indefinitely. max_turns (int, optional): The maximum number of turns in the group chat before stopping. Defaults to None, meaning no limit. - Examples: + Basic example: .. code-block:: python @@ -99,11 +119,49 @@ async def main() -> None: termination = MaxMessageTermination(3) team = Swarm([agent1, agent2], termination_condition=termination) - stream = team.run_stream("What is bob's birthday?") + stream = team.run_stream(task="What is bob's birthday?") async for message in stream: print(message) + asyncio.run(main()) + + + Using the :class:`~autogen_agentchat.task.HandoffTermination` for human-in-the-loop handoff: + + .. code-block:: python + + import asyncio + from autogen_ext.models import OpenAIChatCompletionClient + from autogen_agentchat.agents import AssistantAgent + from autogen_agentchat.teams import Swarm + from autogen_agentchat.task import HandoffTermination, Console, MaxMessageTermination + from autogen_agentchat.messages import HandoffMessage + + + async def main() -> None: + model_client = OpenAIChatCompletionClient(model="gpt-4o") + + agent = AssistantAgent( + "Alice", + model_client=model_client, + handoffs=["user"], + system_message="You are Alice and you only answer questions about yourself, ask the user for help if needed.", + ) + termination = HandoffTermination(target="user") | MaxMessageTermination(3) + team = Swarm([agent], termination_condition=termination) + + # Start the conversation. + await Console(team.run_stream(task="What is bob's birthday?")) + + # Resume with user feedback. + await Console( + team.run_stream( + task=HandoffMessage(source="user", target="Alice", content="Bob's birthday is on 1st January.") + ) + ) + + asyncio.run(main()) """ diff --git a/python/packages/autogen-agentchat/tests/test_group_chat.py b/python/packages/autogen-agentchat/tests/test_group_chat.py index db8bfa9d4167..7df27abcbcd5 100644 --- a/python/packages/autogen-agentchat/tests/test_group_chat.py +++ b/python/packages/autogen-agentchat/tests/test_group_chat.py @@ -24,7 +24,7 @@ ToolCallMessage, ToolCallResultMessage, ) -from autogen_agentchat.task import MaxMessageTermination, TextMentionTermination +from autogen_agentchat.task import Console, HandoffTermination, MaxMessageTermination, TextMentionTermination from autogen_agentchat.teams import ( RoundRobinGroupChat, SelectorGroupChat, @@ -315,6 +315,14 @@ async def test_round_robin_group_chat_with_tools(monkeypatch: pytest.MonkeyPatch assert message == result.messages[index] index += 1 + # Test Console. + tool_use_agent._model_context.clear() # pyright: ignore + mock.reset() + index = 0 + await team.reset() + result2 = await Console(team.run_stream(task="Write a program that prints 'Hello, world!'")) + assert result2 == result + @pytest.mark.asyncio async def test_round_robin_group_chat_with_resume_and_reset() -> None: @@ -476,6 +484,14 @@ async def test_selector_group_chat(monkeypatch: pytest.MonkeyPatch) -> None: assert message == result.messages[index] index += 1 + # Test Console. + mock.reset() + agent1._count = 0 # pyright: ignore + index = 0 + await team.reset() + result2 = await Console(team.run_stream(task="Write a program that prints 'Hello, world!'")) + assert result2 == result + @pytest.mark.asyncio async def test_selector_group_chat_two_speakers(monkeypatch: pytest.MonkeyPatch) -> None: @@ -528,6 +544,14 @@ async def test_selector_group_chat_two_speakers(monkeypatch: pytest.MonkeyPatch) assert message == result.messages[index] index += 1 + # Test Console. + mock.reset() + agent1._count = 0 # pyright: ignore + index = 0 + await team.reset() + result2 = await Console(team.run_stream(task="Write a program that prints 'Hello, world!'")) + assert result2 == result + @pytest.mark.asyncio async def test_selector_group_chat_two_speakers_allow_repeated(monkeypatch: pytest.MonkeyPatch) -> None: @@ -595,6 +619,13 @@ async def test_selector_group_chat_two_speakers_allow_repeated(monkeypatch: pyte assert message == result.messages[index] index += 1 + # Test Console. + mock.reset() + index = 0 + await team.reset() + result2 = await Console(team.run_stream(task="Write a program that prints 'Hello, world!'")) + assert result2 == result + @pytest.mark.asyncio async def test_selector_group_chat_custom_selector(monkeypatch: pytest.MonkeyPatch) -> None: @@ -792,6 +823,14 @@ async def test_swarm_handoff_using_tool_calls(monkeypatch: pytest.MonkeyPatch) - assert message == result.messages[index] index += 1 + # Test Console + agent1._model_context.clear() # pyright: ignore + mock.reset() + index = 0 + await team.reset() + result2 = await Console(team.run_stream(task="task")) + assert result2 == result + @pytest.mark.asyncio async def test_swarm_pause_and_resume() -> None: @@ -815,3 +854,56 @@ async def test_swarm_pause_and_resume() -> None: result = await team.run() assert len(result.messages) == 1 assert result.messages[0].content == "Transferred to second_agent." + + +@pytest.mark.asyncio +async def test_swarm_with_handoff_termination() -> None: + first_agent = _HandOffAgent("first_agent", description="first agent", next_agent="second_agent") + second_agent = _HandOffAgent("second_agent", description="second agent", next_agent="third_agent") + third_agent = _HandOffAgent("third_agent", description="third agent", next_agent="first_agent") + + # Handoff to an existing agent. + termination = HandoffTermination(target="third_agent") + team = Swarm([second_agent, first_agent, third_agent], termination_condition=termination) + # Start + result = await team.run(task="task") + assert len(result.messages) == 2 + assert result.messages[0].content == "task" + assert result.messages[1].content == "Transferred to third_agent." + # Resume existing. + result = await team.run() + assert len(result.messages) == 3 + assert result.messages[0].content == "Transferred to first_agent." + assert result.messages[1].content == "Transferred to second_agent." + assert result.messages[2].content == "Transferred to third_agent." + # Resume new task. + result = await team.run(task="new task") + assert len(result.messages) == 4 + assert result.messages[0].content == "new task" + assert result.messages[1].content == "Transferred to first_agent." + assert result.messages[2].content == "Transferred to second_agent." + assert result.messages[3].content == "Transferred to third_agent." + + # Handoff to a non-existing agent. + third_agent = _HandOffAgent("third_agent", description="third agent", next_agent="non_existing_agent") + termination = HandoffTermination(target="non_existing_agent") + team = Swarm([second_agent, first_agent, third_agent], termination_condition=termination) + # Start + result = await team.run(task="task") + assert len(result.messages) == 3 + assert result.messages[0].content == "task" + assert result.messages[1].content == "Transferred to third_agent." + assert result.messages[2].content == "Transferred to non_existing_agent." + # Attempt to resume. + with pytest.raises(ValueError): + await team.run() + # Attempt to resume with a new task. + with pytest.raises(ValueError): + await team.run(task="new task") + # Resume with a HandoffMessage + result = await team.run(task=HandoffMessage(content="Handoff to first_agent.", target="first_agent", source="user")) + assert len(result.messages) == 4 + assert result.messages[0].content == "Handoff to first_agent." + assert result.messages[1].content == "Transferred to second_agent." + assert result.messages[2].content == "Transferred to third_agent." + assert result.messages[3].content == "Transferred to non_existing_agent." diff --git a/python/packages/autogen-agentchat/tests/test_termination_condition.py b/python/packages/autogen-agentchat/tests/test_termination_condition.py index a56d8df356cc..ec6ff43e00ce 100644 --- a/python/packages/autogen-agentchat/tests/test_termination_condition.py +++ b/python/packages/autogen-agentchat/tests/test_termination_condition.py @@ -1,10 +1,13 @@ import asyncio import pytest +from autogen_agentchat.base import TerminatedException from autogen_agentchat.messages import HandoffMessage, StopMessage, TextMessage from autogen_agentchat.task import ( + ExternalTermination, HandoffTermination, MaxMessageTermination, + SourceMatchTermination, StopMessageTermination, TextMentionTermination, TimeoutTermination, @@ -226,3 +229,41 @@ async def test_timeout_termination() -> None: assert await termination([TextMessage(content="Hello", source="user")]) is None await asyncio.sleep(0.2) assert await termination([TextMessage(content="World", source="user")]) is not None + + +@pytest.mark.asyncio +async def test_external_termination() -> None: + termination = ExternalTermination() + + assert await termination([]) is None + assert not termination.terminated + + termination.set() + assert await termination([]) is not None + assert termination.terminated + + await termination.reset() + assert await termination([]) is None + + +@pytest.mark.asyncio +async def test_source_match_termination() -> None: + termination = SourceMatchTermination(sources=["Assistant"]) + assert await termination([]) is None + + continue_messages = [TextMessage(content="Hello", source="agent"), TextMessage(content="Hello", source="user")] + assert await termination(continue_messages) is None + + terminate_messages = [ + TextMessage(content="Hello", source="agent"), + TextMessage(content="Hello", source="Assistant"), + TextMessage(content="Hello", source="user"), + ] + result = await termination(terminate_messages) + assert isinstance(result, StopMessage) + assert termination.terminated + + with pytest.raises(TerminatedException): + await termination([]) + await termination.reset() + assert not termination.terminated diff --git a/python/packages/autogen-agentchat/tests/test_userproxy_agent.py b/python/packages/autogen-agentchat/tests/test_userproxy_agent.py new file mode 100644 index 000000000000..2ef3053f09bf --- /dev/null +++ b/python/packages/autogen-agentchat/tests/test_userproxy_agent.py @@ -0,0 +1,103 @@ +import asyncio +from typing import Optional, Sequence + +import pytest +from autogen_agentchat.agents import UserProxyAgent +from autogen_agentchat.base import Response +from autogen_agentchat.messages import ChatMessage, HandoffMessage, TextMessage +from autogen_core.base import CancellationToken + + +@pytest.mark.asyncio +async def test_basic_input() -> None: + """Test basic message handling with custom input""" + + def custom_input(prompt: str) -> str: + return "The height of the eiffel tower is 324 meters. Aloha!" + + agent = UserProxyAgent(name="test_user", input_func=custom_input) + messages = [TextMessage(content="What is the height of the eiffel tower?", source="assistant")] + + response = await agent.on_messages(messages, CancellationToken()) + + assert isinstance(response, Response) + assert isinstance(response.chat_message, TextMessage) + assert response.chat_message.content == "The height of the eiffel tower is 324 meters. Aloha!" + assert response.chat_message.source == "test_user" + + +@pytest.mark.asyncio +async def test_async_input() -> None: + """Test handling of async input function""" + + async def async_input(prompt: str, token: Optional[CancellationToken] = None) -> str: + await asyncio.sleep(0.1) + return "async response" + + agent = UserProxyAgent(name="test_user", input_func=async_input) + messages = [TextMessage(content="test prompt", source="assistant")] + + response = await agent.on_messages(messages, CancellationToken()) + + assert isinstance(response.chat_message, TextMessage) + assert response.chat_message.content == "async response" + assert response.chat_message.source == "test_user" + + +@pytest.mark.asyncio +async def test_handoff_handling() -> None: + """Test handling of handoff messages""" + + def custom_input(prompt: str) -> str: + return "handoff response" + + agent = UserProxyAgent(name="test_user", input_func=custom_input) + + messages: Sequence[ChatMessage] = [ + TextMessage(content="Initial message", source="assistant"), + HandoffMessage(content="Handing off to user for confirmation", source="assistant", target="test_user"), + ] + + response = await agent.on_messages(messages, CancellationToken()) + + assert isinstance(response.chat_message, HandoffMessage) + assert response.chat_message.content == "handoff response" + assert response.chat_message.source == "test_user" + assert response.chat_message.target == "assistant" + + +@pytest.mark.asyncio +async def test_cancellation() -> None: + """Test cancellation during message handling""" + + async def cancellable_input(prompt: str, token: Optional[CancellationToken] = None) -> str: + await asyncio.sleep(0.1) + if token and token.is_cancelled(): + raise asyncio.CancelledError() + return "cancellable response" + + agent = UserProxyAgent(name="test_user", input_func=cancellable_input) + messages = [TextMessage(content="test prompt", source="assistant")] + token = CancellationToken() + + async def cancel_after_delay() -> None: + await asyncio.sleep(0.05) + token.cancel() + + with pytest.raises(asyncio.CancelledError): + await asyncio.gather(agent.on_messages(messages, token), cancel_after_delay()) + + +@pytest.mark.asyncio +async def test_error_handling() -> None: + """Test error handling with problematic input function""" + + def failing_input(_: str) -> str: + raise ValueError("Input function failed") + + agent = UserProxyAgent(name="test_user", input_func=failing_input) + messages = [TextMessage(content="test prompt", source="assistant")] + + with pytest.raises(RuntimeError) as exc_info: + await agent.on_messages(messages, CancellationToken()) + assert "Failed to get user input" in str(exc_info.value) diff --git a/python/packages/autogen-core/docs/drawio/selector-group-chat.drawio b/python/packages/autogen-core/docs/drawio/selector-group-chat.drawio new file mode 100644 index 000000000000..ad363bfd7fa1 --- /dev/null +++ b/python/packages/autogen-core/docs/drawio/selector-group-chat.drawio @@ -0,0 +1,52 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/python/packages/autogen-core/docs/drawio/swarm_customer_support.drawio b/python/packages/autogen-core/docs/drawio/swarm_customer_support.drawio new file mode 100644 index 000000000000..798b921cd5ef --- /dev/null +++ b/python/packages/autogen-core/docs/drawio/swarm_customer_support.drawio @@ -0,0 +1,56 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/python/packages/autogen-core/docs/drawio/swarm_stock_research.drawio b/python/packages/autogen-core/docs/drawio/swarm_stock_research.drawio new file mode 100644 index 000000000000..83d699e5decd --- /dev/null +++ b/python/packages/autogen-core/docs/drawio/swarm_stock_research.drawio @@ -0,0 +1,73 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/python/packages/autogen-core/docs/src/_extension/code_lint.py b/python/packages/autogen-core/docs/src/_extension/code_lint.py new file mode 100644 index 000000000000..b8c01bfd069f --- /dev/null +++ b/python/packages/autogen-core/docs/src/_extension/code_lint.py @@ -0,0 +1,98 @@ +# Modified from: https://github.com/kai687/sphinxawesome-codelinter + +import tempfile +from typing import AbstractSet, Any, Iterable + +from docutils import nodes +from sphinx.application import Sphinx +from sphinx.builders import Builder +from sphinx.util import logging +from sphinx.util.console import darkgreen, darkred, red, teal, faint # type: ignore[attr-defined] + +from pygments import highlight # type: ignore +from pygments.lexers import PythonLexer +from pygments.formatters import TerminalFormatter + +logger = logging.getLogger(__name__) + +__version__ = "0.1.0" + + +class CodeLinter(Builder): + """Iterate over all ``literal_block`` nodes. + + pipe them into any command line tool that + can read from standard input. + """ + + name = "code_lint" + allow_parallel = True + + def init(self) -> None: + """Initialize.""" + self._had_errors = False + pass + + def get_outdated_docs(self) -> str | Iterable[str]: + """Check for outdated files. + + Return an iterable of outdated output files, or a string describing what an + update will build. + """ + return self.env.found_docs + + def get_target_uri(self, docname: str, typ: str | None = None) -> str: + """Return Target URI for a document name.""" + return "" + + def prepare_writing(self, docnames: AbstractSet[str]) -> None: + """Run these steps before documents are written.""" + return + + def write_doc(self, docname: str, doctree: nodes.Node) -> None: + path_prefix: str = self.app.config.code_lint_path_prefix + supported_languages = set(["python"]) + + if not docname.startswith(path_prefix): + return + + for code in doctree.findall(nodes.literal_block): + if code["language"] in supported_languages: + logger.info("Checking a code block in %s...", docname, nonl=True) + if "ignore" in code["classes"]: + logger.info(" " + darkgreen("OK[ignored]")) + continue + + # Create a temporary file to store the code block + with tempfile.NamedTemporaryFile(mode="wb", suffix=".py") as temp_file: + temp_file.write(code.astext().encode()) + temp_file.flush() + + # Run pyright on the temporary file using subprocess.run + import subprocess + + result = subprocess.run(["pyright", temp_file.name], capture_output=True, text=True) + if result.returncode != 0: + logger.info(" " + darkred("FAIL")) + highlighted_code = highlight(code.astext(), PythonLexer(), TerminalFormatter()) # type: ignore + output = f"{faint('========================================================')}\n{red('Error')}: Pyright found issues in {teal(docname)}:\n{faint('--------------------------------------------------------')}\n{highlighted_code}\n{faint('--------------------------------------------------------')}\n\n{teal('pyright output:')}\n{red(result.stdout)}{faint('========================================================')}\n" + logger.info(output) + self._had_errors = True + else: + logger.info(" " + darkgreen("OK")) + + def finish(self) -> None: + """Finish the build process.""" + if self._had_errors: + raise RuntimeError("Code linting failed - see earlier output") + + +def setup(app: Sphinx) -> dict[str, Any]: + app.add_builder(CodeLinter) + app.add_config_value("code_lint_path_prefix", "", "env") + + return { + "version": __version__, + "parallel_read_safe": True, + "parallel_write_safe": True, + } diff --git a/python/packages/autogen-core/docs/src/conf.py b/python/packages/autogen-core/docs/src/conf.py index 11341873d141..9852428ae436 100644 --- a/python/packages/autogen-core/docs/src/conf.py +++ b/python/packages/autogen-core/docs/src/conf.py @@ -37,7 +37,8 @@ "sphinx_copybutton", "_extension.gallery_directive", "myst_nb", - "sphinxcontrib.autodoc_pydantic" + "sphinxcontrib.autodoc_pydantic", + "_extension.code_lint", ] suppress_warnings = ["myst.header"] @@ -148,6 +149,14 @@ intersphinx_mapping = {"python": ("https://docs.python.org/3", None)} +code_lint_path_prefix = "reference/python" + +nb_mime_priority_overrides = [ + ('code_lint', 'image/jpeg', 100), + ('code_lint', 'image/png', 100), + ('code_lint', 'text/plain', 100) +] + def setup_to_main( app: Sphinx, pagename: str, templatename: str, context, doctree diff --git a/python/packages/autogen-core/docs/src/index.md b/python/packages/autogen-core/docs/src/index.md index 674492e3f231..bee058bd9124 100644 --- a/python/packages/autogen-core/docs/src/index.md +++ b/python/packages/autogen-core/docs/src/index.md @@ -61,7 +61,7 @@ AgentChat High-level API that includes preset agents and teams for building multi-agent systems. ```sh -pip install 'autogen-agentchat==0.4.0.dev6' +pip install 'autogen-agentchat==0.4.0.dev7' ``` 💡 *Start here if you are looking for an API similar to AutoGen 0.2* @@ -82,7 +82,7 @@ Get Started Provides building blocks for creating asynchronous, event driven multi-agent systems. ```sh -pip install 'autogen-core==0.4.0.dev6' +pip install 'autogen-core==0.4.0.dev7' ``` +++ diff --git a/python/packages/autogen-core/docs/src/packages/index.md b/python/packages/autogen-core/docs/src/packages/index.md index dd86c4c95122..b659482e0d67 100644 --- a/python/packages/autogen-core/docs/src/packages/index.md +++ b/python/packages/autogen-core/docs/src/packages/index.md @@ -31,10 +31,10 @@ myst: Library that is at a similar level of abstraction as AutoGen 0.2, including default agents and group chat. ```sh -pip install 'autogen-agentchat==0.4.0.dev6' +pip install 'autogen-agentchat==0.4.0.dev7' ``` -[{fas}`circle-info;pst-color-primary` User Guide](/user-guide/agentchat-user-guide/index.md) | [{fas}`file-code;pst-color-primary` API Reference](/reference/python/autogen_agentchat/autogen_agentchat.rst) | [{fab}`python;pst-color-primary` PyPI](https://pypi.org/project/autogen-agentchat/0.4.0.dev6/) | [{fab}`github;pst-color-primary` Source](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-agentchat) +[{fas}`circle-info;pst-color-primary` User Guide](/user-guide/agentchat-user-guide/index.md) | [{fas}`file-code;pst-color-primary` API Reference](/reference/python/autogen_agentchat/autogen_agentchat.rst) | [{fab}`python;pst-color-primary` PyPI](https://pypi.org/project/autogen-agentchat/0.4.0.dev7/) | [{fab}`github;pst-color-primary` Source](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-agentchat) ::: (pkg-info-autogen-core)= @@ -46,10 +46,10 @@ pip install 'autogen-agentchat==0.4.0.dev6' Implements the core functionality of the AutoGen framework, providing basic building blocks for creating multi-agent systems. ```sh -pip install 'autogen-core==0.4.0.dev6' +pip install 'autogen-core==0.4.0.dev7' ``` -[{fas}`circle-info;pst-color-primary` User Guide](/user-guide/core-user-guide/index.md) | [{fas}`file-code;pst-color-primary` API Reference](/reference/python/autogen_core/autogen_core.rst) | [{fab}`python;pst-color-primary` PyPI](https://pypi.org/project/autogen-core/0.4.0.dev6/) | [{fab}`github;pst-color-primary` Source](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-core) +[{fas}`circle-info;pst-color-primary` User Guide](/user-guide/core-user-guide/index.md) | [{fas}`file-code;pst-color-primary` API Reference](/reference/python/autogen_core/autogen_core.rst) | [{fab}`python;pst-color-primary` PyPI](https://pypi.org/project/autogen-core/0.4.0.dev7/) | [{fab}`github;pst-color-primary` Source](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-core) ::: (pkg-info-autogen-ext)= @@ -61,7 +61,7 @@ pip install 'autogen-core==0.4.0.dev6' Implementations of core components that interface with external services, or use extra dependencies. For example, Docker based code execution. ```sh -pip install 'autogen-ext==0.4.0.dev6' +pip install 'autogen-ext==0.4.0.dev7' ``` Extras: @@ -71,7 +71,7 @@ Extras: - `docker` needed for {py:class}`~autogen_ext.code_executors.DockerCommandLineCodeExecutor` - `openai` needed for {py:class}`~autogen_ext.models.OpenAIChatCompletionClient` -[{fas}`circle-info;pst-color-primary` User Guide](/user-guide/extensions-user-guide/index.md) | [{fas}`file-code;pst-color-primary` API Reference](/reference/python/autogen_ext/autogen_ext.rst) | [{fab}`python;pst-color-primary` PyPI](https://pypi.org/project/autogen-ext/0.4.0.dev6/) | [{fab}`github;pst-color-primary` Source](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-ext) +[{fas}`circle-info;pst-color-primary` User Guide](/user-guide/extensions-user-guide/index.md) | [{fas}`file-code;pst-color-primary` API Reference](/reference/python/autogen_ext/autogen_ext.rst) | [{fab}`python;pst-color-primary` PyPI](https://pypi.org/project/autogen-ext/0.4.0.dev7/) | [{fab}`github;pst-color-primary` Source](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-ext) ::: (pkg-info-autogen-magentic-one)= diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/installation.md b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/installation.md index ba3826fe2c37..74116bc39fa5 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/installation.md +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/installation.md @@ -61,7 +61,11 @@ Install the `autogen-agentchat` package using pip: ```bash -pip install 'autogen-agentchat==0.4.0.dev6' +pip install 'autogen-agentchat==0.4.0.dev7' +``` + +```{note} +Python 3.10 or later is required. ``` ## Install OpenAI for Model Client @@ -70,7 +74,7 @@ To use the OpenAI and Azure OpenAI models, you need to install the following extensions: ```bash -pip install 'autogen-ext[openai]==0.4.0.dev6' +pip install 'autogen-ext[openai]==0.4.0.dev7' ``` ## Install Docker for Code Execution diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/quickstart.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/quickstart.ipynb index 061284f78004..9e946a47340b 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/quickstart.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/quickstart.ipynb @@ -37,7 +37,7 @@ }, "outputs": [], "source": [ - "pip install 'autogen-agentchat==0.4.0.dev6' 'autogen-ext[openai]==0.4.0.dev6'" + "pip install 'autogen-agentchat==0.4.0.dev7' 'autogen-ext[openai]==0.4.0.dev7'" ] }, { diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb index b39c6c8296a3..9fa0b845fc26 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/agents.ipynb @@ -29,7 +29,7 @@ }, { "cell_type": "code", - "execution_count": 11, + "execution_count": 1, "metadata": {}, "outputs": [], "source": [ @@ -107,31 +107,55 @@ "### Stream Messages\n", "\n", "We can also stream each message as it is generated by the agent by using the\n", - "{py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages_stream` method." + "{py:meth}`~autogen_agentchat.agents.AssistantAgent.on_messages_stream` method,\n", + "and use {py:class}`~autogen_agentchat.task.Console` to print the messages\n", + "as they appear to the console." ] }, { "cell_type": "code", - "execution_count": 12, + "execution_count": 2, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "source='assistant' models_usage=RequestUsage(prompt_tokens=61, completion_tokens=15) content=[FunctionCall(id='call_fXhM4PeZsodhhUOlNiFkoBXF', arguments='{\"query\":\"AutoGen\"}', name='web_search')]\n", - "source='assistant' models_usage=None content=[FunctionExecutionResult(content='AutoGen is a programming framework for building multi-agent applications.', call_id='call_fXhM4PeZsodhhUOlNiFkoBXF')]\n", - "Response(chat_message=TextMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=92, completion_tokens=31), content='AutoGen is a programming framework designed for building multi-agent applications. If you need more specific information about its features or usage, feel free to ask!'), inner_messages=[ToolCallMessage(source='assistant', models_usage=RequestUsage(prompt_tokens=61, completion_tokens=15), content=[FunctionCall(id='call_fXhM4PeZsodhhUOlNiFkoBXF', arguments='{\"query\":\"AutoGen\"}', name='web_search')]), ToolCallResultMessage(source='assistant', models_usage=None, content=[FunctionExecutionResult(content='AutoGen is a programming framework for building multi-agent applications.', call_id='call_fXhM4PeZsodhhUOlNiFkoBXF')])])\n" + "---------- assistant ----------\n", + "[FunctionCall(id='call_fSp5iTGVm2FKw5NIvfECSqNd', arguments='{\"query\":\"AutoGen information\"}', name='web_search')]\n", + "[Prompt tokens: 61, Completion tokens: 16]\n", + "---------- assistant ----------\n", + "[FunctionExecutionResult(content='AutoGen is a programming framework for building multi-agent applications.', call_id='call_fSp5iTGVm2FKw5NIvfECSqNd')]\n", + "---------- assistant ----------\n", + "AutoGen is a programming framework designed for building multi-agent applications. If you need more detailed information or specific aspects about AutoGen, feel free to ask!\n", + "[Prompt tokens: 93, Completion tokens: 32]\n", + "---------- Summary ----------\n", + "Number of inner messages: 2\n", + "Total prompt tokens: 154\n", + "Total completion tokens: 48\n", + "Duration: 4.30 seconds\n" ] } ], "source": [ + "from autogen_agentchat.task import Console\n", + "\n", + "\n", "async def assistant_run_stream() -> None:\n", - " async for message in agent.on_messages_stream(\n", - " [TextMessage(content=\"Find information on AutoGen\", source=\"user\")],\n", - " cancellation_token=CancellationToken(),\n", - " ):\n", - " print(message)\n", + " # Option 1: read each message from the stream.\n", + " # async for message in agent.on_messages_stream(\n", + " # [TextMessage(content=\"Find information on AutoGen\", source=\"user\")],\n", + " # cancellation_token=CancellationToken(),\n", + " # ):\n", + " # print(message)\n", + "\n", + " # Option 2: use Console to print all messages as they appear.\n", + " await Console(\n", + " agent.on_messages_stream(\n", + " [TextMessage(content=\"Find information on AutoGen\", source=\"user\")],\n", + " cancellation_token=CancellationToken(),\n", + " )\n", + " )\n", "\n", "\n", "# Use asyncio.run(assistant_run_stream()) when running in a script.\n", @@ -158,6 +182,29 @@ "For more information on tool calling, refer to the documentation from [OpenAI](https://platform.openai.com/docs/guides/function-calling) and [Anthropic](https://docs.anthropic.com/en/docs/build-with-claude/tool-use)." ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Other Preset Agents\n", + "\n", + "The following preset agents are available:\n", + "\n", + "- {py:class}`~autogen_agentchat.agents.CodeExecutorAgent`: An agent that can execute code.\n", + "- {py:class}`~autogen_ext.agents.OpenAIAssistantAgent`: An agent that is backed by an OpenAI Assistant, with ability to use custom tools.\n", + "- {py:class}`~autogen_ext.agents.MultimodalWebSurfer`: A multi-modal agent that can search the web and visit web pages for information." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Next Step\n", + "\n", + "Now we have discussed how to use the {py:class}`~autogen_agentchat.agents.AssistantAgent`,\n", + "we can move on to the next section to learn how to use the teams feature of AgentChat." + ] + }, { "cell_type": "markdown", "metadata": {}, @@ -186,246 +233,6 @@ ")\n", "print(result) -->" ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## CodeExecutorAgent\n", - "\n", - "The {py:class}`~autogen_agentchat.agents.CodeExecutorAgent`\n", - "preset extracts and executes code snippets found in received messages and returns the output. It is typically used within a team with another agent that generates code snippets to be executed.\n", - "\n", - "```{note}\n", - "It is recommended that the {py:class}`~autogen_agentchat.agents.CodeExecutorAgent` agent\n", - "uses a Docker container to execute code. This ensures that model-generated code is executed in an isolated environment. To use Docker, your environment must have Docker installed and running. \n", - "Follow the installation instructions for [Docker](https://docs.docker.com/get-docker/).\n", - "```\n", - "\n", - "In this example, we show how to set up a {py:class}`~autogen_agentchat.agents.CodeExecutorAgent` agent that uses the \n", - "{py:class}`~autogen_ext.code_executors.DockerCommandLineCodeExecutor` \n", - "to execute code snippets in a Docker container. The `work_dir` parameter indicates where all executed files are first saved locally before being executed in the Docker container." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "source='code_executor' models_usage=None content='Hello world\\n'\n" - ] - } - ], - "source": [ - "from autogen_agentchat.agents import CodeExecutorAgent\n", - "from autogen_ext.code_executors import DockerCommandLineCodeExecutor\n", - "\n", - "\n", - "async def run_code_executor_agent() -> None:\n", - " # Create a code executor agent that uses a Docker container to execute code.\n", - " code_executor = DockerCommandLineCodeExecutor(work_dir=\"coding\")\n", - " await code_executor.start()\n", - " code_executor_agent = CodeExecutorAgent(\"code_executor\", code_executor=code_executor)\n", - "\n", - " # Run the agent with a given code snippet.\n", - " task = TextMessage(\n", - " content=\"\"\"Here is some code\n", - "```python\n", - "print('Hello world')\n", - "```\n", - "\"\"\",\n", - " source=\"user\",\n", - " )\n", - " response = await code_executor_agent.on_messages([task], CancellationToken())\n", - " print(response.chat_message)\n", - "\n", - " # Stop the code executor.\n", - " await code_executor.stop()\n", - "\n", - "\n", - "# Use asyncio.run(run_code_executor_agent()) when running in a script.\n", - "await run_code_executor_agent()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This example shows the agent executing a code snippet that prints \"Hello world\".\n", - "The agent then returns the output of the code execution." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Build Your Own Agents\n", - "\n", - "You may have agents with behaviors that do not fall into a preset. \n", - "In such cases, you can build custom agents.\n", - "\n", - "All agents in AgentChat inherit from {py:class}`~autogen_agentchat.agents.BaseChatAgent` \n", - "class and implement the following abstract methods and attributes:\n", - "\n", - "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages`: The abstract method that defines the behavior of the agent in response to messages. This method is called when the agent is asked to provide a response in {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run`. It returns a {py:class}`~autogen_agentchat.base.Response` object.\n", - "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_reset`: The abstract method that resets the agent to its initial state. This method is called when the agent is asked to reset itself.\n", - "- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.produced_message_types`: The list of possible {py:class}`~autogen_agentchat.messages.ChatMessage` message types the agent can produce in its response.\n", - "\n", - "Optionally, you can implement the the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` method to stream messages as they are generated by the agent. If this method is not implemented, the agent\n", - "uses the default implementation of {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`\n", - "that calls the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` method and\n", - "yields all messages in the response." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### CounterDownAgent\n", - "\n", - "In this example, we create a simple agent that counts down from a given number to zero,\n", - "and produces a stream of messages with the current count." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "3...\n", - "2...\n", - "1...\n", - "Done!\n" - ] - } - ], - "source": [ - "from typing import AsyncGenerator, List, Sequence\n", - "\n", - "from autogen_core.base import CancellationToken\n", - "from autogen_agentchat.agents import BaseChatAgent\n", - "from autogen_agentchat.base import Response\n", - "from autogen_agentchat.messages import AgentMessage, ChatMessage, TextMessage\n", - "\n", - "\n", - "class CountDownAgent(BaseChatAgent):\n", - " def __init__(self, name: str, count: int = 3):\n", - " super().__init__(name, \"A simple agent that counts down.\")\n", - " self._count = count\n", - "\n", - " @property\n", - " def produced_message_types(self) -> List[type[ChatMessage]]:\n", - " return [TextMessage]\n", - "\n", - " async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response:\n", - " # Calls the on_messages_stream.\n", - " response: Response | None = None\n", - " async for message in self.on_messages_stream(messages, cancellation_token):\n", - " if isinstance(message, Response):\n", - " response = message\n", - " assert response is not None\n", - " return response\n", - "\n", - " async def on_messages_stream(\n", - " self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken\n", - " ) -> AsyncGenerator[AgentMessage | Response, None]:\n", - " inner_messages: List[AgentMessage] = []\n", - " for i in range(self._count, 0, -1):\n", - " msg = TextMessage(content=f\"{i}...\", source=self.name)\n", - " inner_messages.append(msg)\n", - " yield msg\n", - " # The response is returned at the end of the stream.\n", - " # It contains the final message and all the inner messages.\n", - " yield Response(chat_message=TextMessage(content=\"Done!\", source=self.name), inner_messages=inner_messages)\n", - "\n", - " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", - " pass\n", - "\n", - "\n", - "async def run_countdown_agent() -> None:\n", - " # Create a countdown agent.\n", - " countdown_agent = CountDownAgent(\"countdown\")\n", - "\n", - " # Run the agent with a given task and stream the response.\n", - " async for message in countdown_agent.on_messages_stream([], CancellationToken()):\n", - " if isinstance(message, Response):\n", - " print(message.chat_message.content)\n", - " else:\n", - " print(message.content)\n", - "\n", - "\n", - "# Use asyncio.run(run_countdown_agent()) when running in a script.\n", - "await run_countdown_agent()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "### UserProxyAgent \n", - "\n", - "A common use case for building a custom agent is to create an agent that acts as a proxy for the user.\n", - "\n", - "In the example below we show how to implement a `UserProxyAgent` - an agent that asks the user to enter\n", - "some text through console and then returns that message as a response." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "I am glad to be here.\n" - ] - } - ], - "source": [ - "import asyncio\n", - "from typing import List, Sequence\n", - "\n", - "from autogen_core.base import CancellationToken\n", - "from autogen_agentchat.base import Response\n", - "from autogen_agentchat.agents import BaseChatAgent\n", - "from autogen_agentchat.messages import ChatMessage, TextMessage\n", - "\n", - "\n", - "class UserProxyAgent(BaseChatAgent):\n", - " def __init__(self, name: str) -> None:\n", - " super().__init__(name, \"A human user.\")\n", - "\n", - " @property\n", - " def produced_message_types(self) -> List[type[ChatMessage]]:\n", - " return [TextMessage]\n", - "\n", - " async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response:\n", - " user_input = await asyncio.get_event_loop().run_in_executor(None, input, \"Enter your response: \")\n", - " return Response(chat_message=TextMessage(content=user_input, source=self.name))\n", - "\n", - " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", - " pass\n", - "\n", - "\n", - "async def run_user_proxy_agent() -> None:\n", - " user_proxy_agent = UserProxyAgent(name=\"user_proxy_agent\")\n", - " response = await user_proxy_agent.on_messages([], CancellationToken())\n", - " print(response.chat_message.content)\n", - "\n", - "\n", - "# Use asyncio.run(run_user_proxy_agent()) when running in a script.\n", - "await run_user_proxy_agent()" - ] } ], "metadata": { @@ -444,7 +251,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.5" + "version": "3.11.5" } }, "nbformat": 4, diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/custom-agents.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/custom-agents.ipynb new file mode 100644 index 000000000000..7c90a04440b6 --- /dev/null +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/custom-agents.ipynb @@ -0,0 +1,166 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Custom Agents\n", + "\n", + "You may have agents with behaviors that do not fall into a preset. \n", + "In such cases, you can build custom agents.\n", + "\n", + "All agents in AgentChat inherit from {py:class}`~autogen_agentchat.agents.BaseChatAgent` \n", + "class and implement the following abstract methods and attributes:\n", + "\n", + "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages`: The abstract method that defines the behavior of the agent in response to messages. This method is called when the agent is asked to provide a response in {py:meth}`~autogen_agentchat.agents.BaseChatAgent.run`. It returns a {py:class}`~autogen_agentchat.base.Response` object.\n", + "- {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_reset`: The abstract method that resets the agent to its initial state. This method is called when the agent is asked to reset itself.\n", + "- {py:attr}`~autogen_agentchat.agents.BaseChatAgent.produced_message_types`: The list of possible {py:class}`~autogen_agentchat.messages.ChatMessage` message types the agent can produce in its response.\n", + "\n", + "Optionally, you can implement the the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream` method to stream messages as they are generated by the agent. If this method is not implemented, the agent\n", + "uses the default implementation of {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages_stream`\n", + "that calls the {py:meth}`~autogen_agentchat.agents.BaseChatAgent.on_messages` method and\n", + "yields all messages in the response." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## CounterDownAgent\n", + "\n", + "In this example, we create a simple agent that counts down from a given number to zero,\n", + "and produces a stream of messages with the current count." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from typing import AsyncGenerator, List, Sequence\n", + "\n", + "from autogen_agentchat.agents import BaseChatAgent\n", + "from autogen_agentchat.base import Response\n", + "from autogen_agentchat.messages import AgentMessage, ChatMessage, TextMessage\n", + "from autogen_core.base import CancellationToken\n", + "\n", + "\n", + "class CountDownAgent(BaseChatAgent):\n", + " def __init__(self, name: str, count: int = 3):\n", + " super().__init__(name, \"A simple agent that counts down.\")\n", + " self._count = count\n", + "\n", + " @property\n", + " def produced_message_types(self) -> List[type[ChatMessage]]:\n", + " return [TextMessage]\n", + "\n", + " async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response:\n", + " # Calls the on_messages_stream.\n", + " response: Response | None = None\n", + " async for message in self.on_messages_stream(messages, cancellation_token):\n", + " if isinstance(message, Response):\n", + " response = message\n", + " assert response is not None\n", + " return response\n", + "\n", + " async def on_messages_stream(\n", + " self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken\n", + " ) -> AsyncGenerator[AgentMessage | Response, None]:\n", + " inner_messages: List[AgentMessage] = []\n", + " for i in range(self._count, 0, -1):\n", + " msg = TextMessage(content=f\"{i}...\", source=self.name)\n", + " inner_messages.append(msg)\n", + " yield msg\n", + " # The response is returned at the end of the stream.\n", + " # It contains the final message and all the inner messages.\n", + " yield Response(chat_message=TextMessage(content=\"Done!\", source=self.name), inner_messages=inner_messages)\n", + "\n", + " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", + " pass\n", + "\n", + "\n", + "async def run_countdown_agent() -> None:\n", + " # Create a countdown agent.\n", + " countdown_agent = CountDownAgent(\"countdown\")\n", + "\n", + " # Run the agent with a given task and stream the response.\n", + " async for message in countdown_agent.on_messages_stream([], CancellationToken()):\n", + " if isinstance(message, Response):\n", + " print(message.chat_message.content)\n", + " else:\n", + " print(message.content)\n", + "\n", + "\n", + "# Use asyncio.run(run_countdown_agent()) when running in a script.\n", + "await run_countdown_agent()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## UserProxyAgent \n", + "\n", + "A common use case for building a custom agent is to create an agent that acts as a proxy for the user.\n", + "\n", + "In the example below we show how to implement a `UserProxyAgent` - an agent that asks the user to enter\n", + "some text through console and then returns that message as a response." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import asyncio\n", + "from typing import List, Sequence\n", + "\n", + "from autogen_agentchat.agents import BaseChatAgent\n", + "from autogen_agentchat.base import Response\n", + "from autogen_agentchat.messages import ChatMessage\n", + "from autogen_core.base import CancellationToken\n", + "\n", + "\n", + "class UserProxyAgent(BaseChatAgent):\n", + " def __init__(self, name: str) -> None:\n", + " super().__init__(name, \"A human user.\")\n", + "\n", + " @property\n", + " def produced_message_types(self) -> List[type[ChatMessage]]:\n", + " return [TextMessage]\n", + "\n", + " async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response:\n", + " user_input = await asyncio.get_event_loop().run_in_executor(None, input, \"Enter your response: \")\n", + " return Response(chat_message=TextMessage(content=user_input, source=self.name))\n", + "\n", + " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", + " pass\n", + "\n", + "\n", + "async def run_user_proxy_agent() -> None:\n", + " user_proxy_agent = UserProxyAgent(name=\"user_proxy_agent\")\n", + " response = await user_proxy_agent.on_messages([], CancellationToken())\n", + " print(response.chat_message.content)\n", + "\n", + "\n", + "# Use asyncio.run(run_user_proxy_agent()) when running in a script.\n", + "await run_user_proxy_agent()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.11.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/index.md b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/index.md index d8a4d8559bb4..fa0ed5f6caa3 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/index.md +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/index.md @@ -46,6 +46,12 @@ A smart team that uses a model-based strategy and custom selector. A dynamic team that uses handoffs to pass tasks between agents. ::: +:::{grid-item-card} {fas}`users;pst-color-primary` Custom Agents +:link: ./custom-agents.html + +How to build custom agents. +::: + :::: ```{toctree} @@ -58,4 +64,5 @@ teams selector-group-chat swarm termination +custom-agents ``` diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/models.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/models.ipynb index 80c9c848273c..f44a187609ff 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/models.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/models.ipynb @@ -1,181 +1,187 @@ { - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Models\n", - "\n", - "In many cases, agents need access to model services such as OpenAI, Azure OpenAI, and local models.\n", - "AgentChat utilizes model clients provided by the\n", - "[`autogen-ext`](../../core-user-guide/framework/model-clients.ipynb) package." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## OpenAI\n", - "\n", - "To access OpenAI models, you need to install the `openai` extension to use the {py:class}`~autogen_ext.models.OpenAIChatCompletionClient`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "vscode": { - "languageId": "shellscript" - } - }, - "outputs": [], - "source": [ - "pip install 'autogen-ext[openai]==0.4.0.dev6'" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "You will also need to obtain an [API key](https://platform.openai.com/account/api-keys) from OpenAI." - ] - }, - { - "cell_type": "code", - "execution_count": 10, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_ext.models import OpenAIChatCompletionClient\n", - "\n", - "opneai_model_client = OpenAIChatCompletionClient(\n", - " model=\"gpt-4o-2024-08-06\",\n", - " # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY environment variable set.\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To test the model client, you can use the following code:" - ] - }, - { - "cell_type": "code", - "execution_count": 11, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "CreateResult(finish_reason='stop', content='The capital of France is Paris.', usage=RequestUsage(prompt_tokens=15, completion_tokens=7), cached=False, logprobs=None)\n" - ] - } - ], - "source": [ - "from autogen_core.components.models import UserMessage\n", - "\n", - "result = await opneai_model_client.create([UserMessage(content=\"What is the capital of France?\", source=\"user\")])\n", - "print(result)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Azure OpenAI\n", - "\n", - "Install the `azure` and `openai` extensions to use the {py:class}`~autogen_ext.models.AzureOpenAIChatCompletionClient`." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "vscode": { - "languageId": "shellscript" - } - }, - "outputs": [], - "source": [ - "pip install 'autogen-ext[openai,azure]==0.4.0.dev6'" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To use the client, you need to provide your deployment id, Azure Cognitive Services endpoint, api version, and model capabilities.\n", - "For authentication, you can either provide an API key or an Azure Active Directory (AAD) token credential.\n", - "\n", - "The following code snippet shows how to use AAD authentication.\n", - "The identity used must be assigned the [Cognitive Services OpenAI User](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/role-based-access-control#cognitive-services-openai-user) role." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from autogen_ext.models import AzureOpenAIChatCompletionClient\n", - "from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n", - "\n", - "# Create the token provider\n", - "token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n", - "\n", - "az_model_client = AzureOpenAIChatCompletionClient(\n", - " model=\"{your-azure-deployment}\",\n", - " api_version=\"2024-06-01\",\n", - " azure_endpoint=\"https://{your-custom-endpoint}.openai.azure.com/\",\n", - " azure_ad_token_provider=token_provider, # Optional if you choose key-based authentication.\n", - " # api_key=\"sk-...\", # For key-based authentication.\n", - " model_capabilities={\n", - " \"vision\": True,\n", - " \"function_calling\": True,\n", - " \"json_output\": True,\n", - " },\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "See [here](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/managed-identity#chat-completions) for how to use the Azure client directly or for more info." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Local Models\n", - "\n", - "We are working on it. Stay tuned!" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": ".venv", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.11.5" - } - }, - "nbformat": 4, - "nbformat_minor": 2 + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Models\n", + "\n", + "In many cases, agents need access to model services such as OpenAI, Azure OpenAI, and local models.\n", + "AgentChat utilizes model clients provided by the\n", + "[`autogen-ext`](../../core-user-guide/framework/model-clients.ipynb) package." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## OpenAI\n", + "\n", + "To access OpenAI models, you need to install the `openai` extension to use the {py:class}`~autogen_ext.models.OpenAIChatCompletionClient`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "vscode": { + "languageId": "shellscript" + } + }, + "outputs": [], + "source": [ + "pip install 'autogen-ext[openai]==0.4.0.dev7'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You will also need to obtain an [API key](https://platform.openai.com/account/api-keys) from OpenAI." + ] + }, + { + "cell_type": "code", + "execution_count": 10, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_ext.models import OpenAIChatCompletionClient\n", + "\n", + "opneai_model_client = OpenAIChatCompletionClient(\n", + " model=\"gpt-4o-2024-08-06\",\n", + " # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY environment variable set.\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To test the model client, you can use the following code:" + ] + }, + { + "cell_type": "code", + "execution_count": 11, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "CreateResult(finish_reason='stop', content='The capital of France is Paris.', usage=RequestUsage(prompt_tokens=15, completion_tokens=7), cached=False, logprobs=None)\n" + ] + } + ], + "source": [ + "from autogen_core.components.models import UserMessage\n", + "\n", + "result = await opneai_model_client.create([UserMessage(content=\"What is the capital of France?\", source=\"user\")])\n", + "print(result)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "```{note}\n", + "You can use this client with models hosted on OpenAI-compatible endpoints, however, we have not tested this functionality.\n", + "See {py:class}`~autogen_ext.models.OpenAIChatCompletionClient` for more information.\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Azure OpenAI\n", + "\n", + "Install the `azure` and `openai` extensions to use the {py:class}`~autogen_ext.models.AzureOpenAIChatCompletionClient`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "vscode": { + "languageId": "shellscript" + } + }, + "outputs": [], + "source": [ + "pip install 'autogen-ext[openai,azure]==0.4.0.dev7'" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To use the client, you need to provide your deployment id, Azure Cognitive Services endpoint, api version, and model capabilities.\n", + "For authentication, you can either provide an API key or an Azure Active Directory (AAD) token credential.\n", + "\n", + "The following code snippet shows how to use AAD authentication.\n", + "The identity used must be assigned the [Cognitive Services OpenAI User](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/role-based-access-control#cognitive-services-openai-user) role." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from autogen_ext.models import AzureOpenAIChatCompletionClient\n", + "from azure.identity import DefaultAzureCredential, get_bearer_token_provider\n", + "\n", + "# Create the token provider\n", + "token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n", + "\n", + "az_model_client = AzureOpenAIChatCompletionClient(\n", + " azure_deployment=\"{your-azure-deployment}\",\n", + " model=\"{model-name, such as gpt-4o}\",\n", + " api_version=\"2024-06-01\",\n", + " azure_endpoint=\"https://{your-custom-endpoint}.openai.azure.com/\",\n", + " azure_ad_token_provider=token_provider, # Optional if you choose key-based authentication.\n", + " # api_key=\"sk-...\", # For key-based authentication.\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "See [here](https://learn.microsoft.com/en-us/azure/ai-services/openai/how-to/managed-identity#chat-completions) for how to use the Azure client directly or for more info." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Local Models\n", + "\n", + "We are working on it. Stay tuned!" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": ".venv", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.5" + } + }, + "nbformat": 4, + "nbformat_minor": 2 } diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/selector-group-chat.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/selector-group-chat.ipynb index 0a3bb0fe3135..3377d3d47c34 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/selector-group-chat.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/selector-group-chat.ipynb @@ -11,46 +11,61 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "The `SelectorGroupChat` implements a team coordination pattern where participants take turns publishing messages, with the next speaker selected by a generative model (LLM) based on the conversation context. This enables dynamic and context-aware multi-agent conversations.\n", + "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` implements a team where participants take turns broadcasting messages to all other participants,\n", + "with the next speaker selected by a generative model (e.g., an LLM) based on the shared context. \n", + "This enables dynamic and context-aware multi-agent collaboration.\n", "\n", - "\n", - "`SelectorGroupChat` provides several key features:\n", - "- Dynamic speaker selection using an LLM to analyze conversation context\n", + "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` provides several key features:\n", + "- Model-based speaker selection\n", "- Configurable participant roles and descriptions\n", "- Optional prevention of consecutive turns by the same speaker\n", "- Customizable selection prompting\n", + "- Customizable selection function to override the default model-based selection\n", + "\n", + "```{note}\n", + "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` is a high-level API.\n", + "If you need more control and customization that is not supported by this API,\n", + "you can take a look at the [Group Chat Pattern](../../core-user-guide/design-patterns/group-chat.ipynb)\n", + "in the Core API documentation and implement your own group chat logic.\n", + "```\n", + "\n", + "## How does it work?\n", "\n", + "{py:class}`~autogen_agentchat.teams.SelectorGroupChat` is a group chat similar to {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`,\n", + "but with a model-based next speaker selection mechanism.\n", + "When the team receives a task through {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run` or {py:meth}`~autogen_agentchat.teams.BaseGroupChat.run_stream`,\n", + "the following steps are executed:\n", "\n", - "### Speaker Selection Process\n", + "1. The team analyzes the current conversation context, including the conversation history and participants' {py:attr}`~autogen_agentchat.base.ChatAgent.name` and {py:attr}`~autogen_agentchat.base.ChatAgent.description` attributes, to determine the next speaker using a model. You can override the model by providing a custom selection function.\n", + "2. The team prompts the selected speaker agent to provide a response, which is then **broadcasted** to all other participants.\n", + "3. The termination condition is checked to determine if the conversation should end, if not, the process repeats from step 1.\n", + "4. When the conversation ends, the team returns the {py:class}`~autogen_agentchat.base.TaskResult` containing the conversation history from this task.\n", "\n", - "The chat uses an LLM to select the next speaker by:\n", - "1. Analyzing the conversation history\n", - "2. Evaluating participant roles and descriptions\n", - "3. Using a configurable prompt template to make the selection\n", - "4. Validating that exactly one participant is selected\n", - "\n" + "Once the team finishes the task, the conversation context is kept within the team and all participants, so the next task can continue from the previous conversation context.\n", + "You can reset the conversation context by calling {py:meth}`~autogen_agentchat.teams.BaseGroupChat.reset`.\n", + "\n", + "In this section, we will demonstrate how to use {py:class}`~autogen_agentchat.teams.SelectorGroupChat` with a simple example for a web search and data analysis task." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Web Search and Analysis Example" ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 14, "metadata": {}, "outputs": [], "source": [ - "import asyncio\n", - "from typing import List, Sequence\n", + "from typing import Sequence\n", "\n", - "from autogen_agentchat.agents import (\n", - " BaseChatAgent,\n", - " CodingAssistantAgent,\n", - " ToolUseAssistantAgent,\n", - ")\n", - "from autogen_agentchat.base import Response\n", - "from autogen_agentchat.messages import ChatMessage, StopMessage, TextMessage\n", - "from autogen_agentchat.task import TextMentionTermination\n", + "from autogen_agentchat.agents import AssistantAgent\n", + "from autogen_agentchat.messages import AgentMessage\n", + "from autogen_agentchat.task import Console, MaxMessageTermination, TextMentionTermination\n", "from autogen_agentchat.teams import SelectorGroupChat\n", - "from autogen_core.base import CancellationToken\n", - "from autogen_core.components.tools import FunctionTool\n", "from autogen_ext.models import OpenAIChatCompletionClient" ] }, @@ -58,61 +73,167 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Defining Agents\n", - "The `UserProxyAgent` allows the user to input messages directly. This agent waits for user input and returns a text message or a stop message if the user decides to terminate the conversation." + "### Agents\n", + "\n", + "![Selector Group Chat](selector-group-chat.svg)\n", + "\n", + "This system uses three specialized agents:\n", + "\n", + "- **Planning Agent**: The strategic coordinator that breaks down complex tasks into manageable subtasks. \n", + "- **Web Search Agent**: An information retrieval specialist that interfaces with the `search_web_tool`.\n", + "- **Data Analyst Agent**: An agent specialist in performing calculations equipped with `percentage_change_tool`. " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The tools `search_web_tool` and `percentage_change_tool` are external tools that the agents can use to perform their tasks." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ - "class UserProxyAgent(BaseChatAgent):\n", - " def __init__(self, name: str) -> None:\n", - " super().__init__(name, \"A human user.\")\n", - "\n", - " @property\n", - " def produced_message_types(self) -> List[type[ChatMessage]]:\n", - " return [TextMessage, StopMessage]\n", - "\n", - " async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response:\n", - " user_input = await asyncio.get_event_loop().run_in_executor(None, input, \"Enter your response: \")\n", - " if \"TERMINATE\" in user_input:\n", - " return Response(chat_message=StopMessage(content=\"User has terminated the conversation.\", source=self.name))\n", - " return Response(chat_message=TextMessage(content=user_input, source=self.name))\n", - "\n", - " async def on_reset(self, cancellation_token: CancellationToken) -> None:\n", - " pass" + "# Note: This example uses mock tools instead of real APIs for demonstration purposes\n", + "def search_web_tool(query: str) -> str:\n", + " if \"2006-2007\" in query:\n", + " return \"\"\"Here are the total points scored by Miami Heat players in the 2006-2007 season:\n", + " Udonis Haslem: 844 points\n", + " Dwayne Wade: 1397 points\n", + " James Posey: 550 points\n", + " ...\n", + " \"\"\"\n", + " elif \"2007-2008\" in query:\n", + " return \"The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.\"\n", + " elif \"2008-2009\" in query:\n", + " return \"The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.\"\n", + " return \"No data found.\"\n", + "\n", + "\n", + "def percentage_change_tool(start: float, end: float) -> float:\n", + " return ((end - start) / start) * 100" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's create the specialized agents using the {py:class}`~autogen_agentchat.agents.AssistantAgent` class.\n", + "It is important to note that the agents' {py:attr}`~autogen_agentchat.base.ChatAgent.name` and {py:attr}`~autogen_agentchat.base.ChatAgent.description` attributes are used by the model to determine the next speaker,\n", + "so it is recommended to provide meaningful names and descriptions." ] }, { "cell_type": "code", - "execution_count": 3, + "execution_count": 11, "metadata": {}, "outputs": [], "source": [ - "async def flight_search(start: str, destination: str, date: str) -> str:\n", - " return \"\\n\".join(\n", - " [\n", - " f\"AC24 from {start} to {destination} on {date} is $500\",\n", - " f\"UA23 from {start} to {destination} on {date} is $450\",\n", - " f\"AL21 from {start} to {destination} on {date} is $400\",\n", - " ]\n", - " )\n", - "\n", - "\n", - "async def flight_booking(flight: str, date: str) -> str:\n", - " return f\"Booked flight {flight} on {date}\"" + "model_client = OpenAIChatCompletionClient(model=\"gpt-4o\")\n", + "\n", + "planning_agent = AssistantAgent(\n", + " \"PlanningAgent\",\n", + " description=\"An agent for planning tasks, this agent should be the first to engage when given a new task.\",\n", + " model_client=model_client,\n", + " system_message=\"\"\"\n", + " You are a planning agent.\n", + " Your job is to break down complex tasks into smaller, manageable subtasks.\n", + " Your team members are:\n", + " Web search agent: Searches for information\n", + " Data analyst: Performs calculations\n", + "\n", + " You only plan and delegate tasks - you do not execute them yourself.\n", + "\n", + " When assigning tasks, use this format:\n", + " 1. : \n", + "\n", + " After all tasks are complete, summarize the findings and end with \"TERMINATE\".\n", + " \"\"\",\n", + ")\n", + "\n", + "web_search_agent = AssistantAgent(\n", + " \"WebSearchAgent\",\n", + " description=\"A web search agent.\",\n", + " tools=[search_web_tool],\n", + " model_client=model_client,\n", + " system_message=\"\"\"\n", + " You are a web search agent.\n", + " Your only tool is search_tool - use it to find information.\n", + " You make only one search call at a time.\n", + " Once you have the results, you never do calculations based on them.\n", + " \"\"\",\n", + ")\n", + "\n", + "data_analyst_agent = AssistantAgent(\n", + " \"DataAnalystAgent\",\n", + " description=\"A data analyst agent. Useful for performing calculations.\",\n", + " model_client=model_client,\n", + " tools=[percentage_change_tool],\n", + " system_message=\"\"\"\n", + " You are a data analyst.\n", + " Given the tasks you have been assigned, you should analyze the data and provide results using the tools provided.\n", + " \"\"\",\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Workflow\n", + "\n", + "1. The task is received by the {py:class}`~autogen_agentchat.teams.SelectorGroupChat` which, based on agent descriptions, selects the most appropriate agent to handle the initial task (typically the Planning Agent).\n", + "\n", + "2. The **Planning Agent** analyzes the task and breaks it down into subtasks, assigning each to the most appropriate agent using the format:\n", + " ` : `\n", + "\n", + "3. Based on the conversation context and agent descriptions, the {py:class}`~autogen_agent.teams.SelectorGroupChat` manager dynamically selects the next agent to handle their assigned subtask.\n", + "\n", + "4. The **Web Search Agent** performs searches one at a time, storing results in the shared conversation history.\n", + "\n", + "5. The **Data Analyst** processes the gathered information using available calculation tools when selected.\n", + "\n", + "6. The workflow continues with agents being dynamically selected until either:\n", + " - The Planning Agent determines all subtasks are complete and sends \"TERMINATE\"\n", + " - An alternative termination condition is met (e.g., a maximum number of messages)\n", + "\n", + "When defining your agents, make sure to include a helpful {py:attr}`~autogen_agentchat.base.ChatAgent.description` since this is used to decide which agent to select next." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The `ToolUseAssistantAgent` is responsible for calling external tools. In this example, two tools are defined: `flight_search` and `flight_booking`.\n", + "Let's create the team with two termination conditions:\n", + "{py:class}`~autogen_agentchat.task.TextMentionTermination` to end the conversation when the Planning Agent sends \"TERMINATE\",\n", + "and {py:class}`~autogen_agentchat.task.MaxMessageTermination` to limit the conversation to 25 messages to avoid infinite loop." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "text_mention_termination = TextMentionTermination(\"TERMINATE\")\n", + "max_messages_termination = MaxMessageTermination(max_messages=25)\n", + "termination = text_mention_termination | max_messages_termination\n", "\n", - "Additionally, the `CodingAssistantAgent` serves as a general travel assistant with predefined behavior specified in the `system_message`." + "team = SelectorGroupChat(\n", + " [planning_agent, web_search_agent, data_analyst_agent],\n", + " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", + " termination_condition=termination,\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now we run the team with a task to find information about an NBA player." ] }, { @@ -124,144 +245,200 @@ "name": "stdout", "output_type": "stream", "text": [ + "---------- user ----------\n", + "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", + "---------- PlanningAgent ----------\n", + "To answer your question, we need to separate this task into several subtasks:\n", "\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-08T20:35:30.283450]:\u001b[0m\n", + "1. Web search agent: Find out who was the Miami Heat player with the highest points in the 2006-2007 NBA season.\n", + "2. Web search agent: Find the total rebounds for that player in the 2007-2008 NBA season.\n", + "3. Web search agent: Find the total rebounds for that player in the 2008-2009 NBA season.\n", + "4. Data analyst: Calculate the percentage change in the player's total rebounds between the 2007-2008 and 2008-2009 seasons.\n", "\n", - "Help user plan a trip and book a flight." + "Let's start with these tasks.\n", + "[Prompt tokens: 159, Completion tokens: 130]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_js7ogBp0UDmHfvLo6BmWFpM1', arguments='{\"query\":\"Miami Heat player highest points 2006-2007 season\"}', name='search_web_tool')]\n", + "[Prompt tokens: 279, Completion tokens: 26]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_js7ogBp0UDmHfvLo6BmWFpM1')]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionCall(id='call_G7ATvIq0rSjc8fqLdKQ5uWI4', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool'), FunctionCall(id='call_Xzw9bAvgfo40EjILophG5pnl', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')]\n", + "[Prompt tokens: 371, Completion tokens: 70]\n", + "---------- WebSearchAgent ----------\n", + "[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_G7ATvIq0rSjc8fqLdKQ5uWI4'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_Xzw9bAvgfo40EjILophG5pnl')]\n", + "---------- WebSearchAgent ----------\n", + "The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with 1,397 points.\n", + "\n", + "Between the 2007-2008 and 2008-2009 seasons, Dwyane Wade's total rebounds increased from 214 to 398. To calculate the percentage change:\n", + "\n", + "Percentage Change = \\(\\frac{(398 - 214)}{214} \\times 100\\)\n", + "\n", + "Now, a data analyst would calculate the actual percentage change based on these numbers.\n", + "[Prompt tokens: 506, Completion tokens: 107]\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionCall(id='call_76VkQ2nnKrwtuI1dmjLQ7G5P', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", + "[Prompt tokens: 383, Completion tokens: 20]\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionExecutionResult(content='85.98130841121495', call_id='call_76VkQ2nnKrwtuI1dmjLQ7G5P')]\n", + "---------- DataAnalystAgent ----------\n", + "The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade. His total rebounds increased by approximately 85.98% between the 2007-2008 and 2008-2009 seasons.\n", + "[Prompt tokens: 424, Completion tokens: 52]\n", + "---------- PlanningAgent ----------\n", + "The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, scoring 1,397 points. Between the 2007-2008 and 2008-2009 seasons, Dwyane Wade's total rebounds increased by approximately 85.98%. \n", + "\n", + "TERMINATE\n", + "[Prompt tokens: 470, Completion tokens: 66]\n", + "---------- Summary ----------\n", + "Number of messages: 11\n", + "Finish reason: Text 'TERMINATE' mentioned\n", + "Total prompt tokens: 2592\n", + "Total completion tokens: 471\n", + "Duration: 11.95 seconds\n" ] }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=159, completion_tokens=130), content=\"To answer your question, we need to separate this task into several subtasks:\\n\\n1. Web search agent: Find out who was the Miami Heat player with the highest points in the 2006-2007 NBA season.\\n2. Web search agent: Find the total rebounds for that player in the 2007-2008 NBA season.\\n3. Web search agent: Find the total rebounds for that player in the 2008-2009 NBA season.\\n4. Data analyst: Calculate the percentage change in the player's total rebounds between the 2007-2008 and 2008-2009 seasons.\\n\\nLet's start with these tasks.\"), ToolCallMessage(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=279, completion_tokens=26), content=[FunctionCall(id='call_js7ogBp0UDmHfvLo6BmWFpM1', arguments='{\"query\":\"Miami Heat player highest points 2006-2007 season\"}', name='search_web_tool')]), ToolCallResultMessage(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='Here are the total points scored by Miami Heat players in the 2006-2007 season:\\n Udonis Haslem: 844 points\\n Dwayne Wade: 1397 points\\n James Posey: 550 points\\n ...\\n ', call_id='call_js7ogBp0UDmHfvLo6BmWFpM1')]), ToolCallMessage(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=371, completion_tokens=70), content=[FunctionCall(id='call_G7ATvIq0rSjc8fqLdKQ5uWI4', arguments='{\"query\": \"Dwyane Wade total rebounds 2007-2008 season\"}', name='search_web_tool'), FunctionCall(id='call_Xzw9bAvgfo40EjILophG5pnl', arguments='{\"query\": \"Dwyane Wade total rebounds 2008-2009 season\"}', name='search_web_tool')]), ToolCallResultMessage(source='WebSearchAgent', models_usage=None, content=[FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2007-2008 is 214.', call_id='call_G7ATvIq0rSjc8fqLdKQ5uWI4'), FunctionExecutionResult(content='The number of total rebounds for Dwayne Wade in the Miami Heat season 2008-2009 is 398.', call_id='call_Xzw9bAvgfo40EjILophG5pnl')]), TextMessage(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=506, completion_tokens=107), content=\"The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with 1,397 points.\\n\\nBetween the 2007-2008 and 2008-2009 seasons, Dwyane Wade's total rebounds increased from 214 to 398. To calculate the percentage change:\\n\\nPercentage Change = \\\\(\\\\frac{(398 - 214)}{214} \\\\times 100\\\\)\\n\\nNow, a data analyst would calculate the actual percentage change based on these numbers.\"), ToolCallMessage(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=383, completion_tokens=20), content=[FunctionCall(id='call_76VkQ2nnKrwtuI1dmjLQ7G5P', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]), ToolCallResultMessage(source='DataAnalystAgent', models_usage=None, content=[FunctionExecutionResult(content='85.98130841121495', call_id='call_76VkQ2nnKrwtuI1dmjLQ7G5P')]), TextMessage(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=424, completion_tokens=52), content='The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade. His total rebounds increased by approximately 85.98% between the 2007-2008 and 2008-2009 seasons.'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=470, completion_tokens=66), content=\"The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, scoring 1,397 points. Between the 2007-2008 and 2008-2009 seasons, Dwyane Wade's total rebounds increased by approximately 85.98%. \\n\\nTERMINATE\")], stop_reason=\"Text 'TERMINATE' mentioned\")" + ] + }, + "execution_count": 12, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "task = \"Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\"\n", + "\n", + "# Use asyncio.run(...) if you are running this in a script.\n", + "await Console(team.run_stream(task=task))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As we can see, after the Web Search Agent conducts the necessary searches and the Data Analyst Agent completes the necessary calculations, we find that Dwayne Wade was the Miami Heat player with the highest points in the 2006-2007 season, and the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons is 85.98%!" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Custom Selector Function" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Often times we want better control over the selection process. \n", + "To this end, we can set the `selector_func` argument with a custom selector function to override the default model-based selection.\n", + "For instance, we want the Planning Agent to speak immediately after any specialized agent to check the progress.\n", + "\n", + "```{note}\n", + "Returning `None` from the custom selector function will use the default model-based selection.\n", + "```" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ + "---------- user ----------\n", + "Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?\n", + "---------- PlanningAgent ----------\n", + "To solve this inquiry, let's break it down into smaller tasks again:\n", "\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-08T20:35:48.275743], User:\u001b[0m\n", - "\n", - "\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-08T20:35:50.795496], TravelAssistant:\u001b[0m\n", - "\n", - "I'd be happy to help you plan your trip! To get started, could you please provide me with the following details:\n", - "\n", - "1. Your departure city and the destination city.\n", - "2. Your travel dates (departure and return).\n", - "3. The number of travelers and their ages (if any children are involved).\n", - "4. Your budget for flights and accommodations, if you have one in mind.\n", - "5. Any specific activities or attractions you're interested in at the destination.\n", - "\n", - "Once I have this information, I can help you find the best options!\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-08T20:35:59.701486], User:\u001b[0m\n", - "\n", - "Traveling to toronto from new york\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-08T20:36:02.325330], TravelAssistant:\u001b[0m\n", - "\n", - "Great choice! Toronto is a vibrant city with a lot to offer. Now, could you please provide the following additional details to help me assist you better?\n", - "\n", - "1. What are your travel dates (departure and return)?\n", - "2. How many travelers will be going, and what are their ages?\n", - "3. Do you have a budget for the flight and accommodations?\n", - "4. Are there any specific activities or attractions you’re interested in while in Toronto?\n", - "\n", - "Once I have this information, I can help you find the best flights and suggestions for your trip!\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-08T20:36:20.633004], User:\u001b[0m\n", - "\n", - "leaving on december 7 and returning on 12\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-08T20:36:23.202871], TravelAssistant:\u001b[0m\n", - "\n", - "Thank you for the details! Here's what I have so far:\n", - "\n", - "- **Departure City:** New York\n", - "- **Destination City:** Toronto\n", - "- **Departure Date:** December 7\n", - "- **Return Date:** December 12\n", + "1. Find out who was the Miami Heat player with the highest points in the 2006-2007 NBA season.\n", + "2. Find that player's total rebounds for the 2007-2008 NBA season.\n", + "3. Find that player's total rebounds for the 2008-2009 NBA season.\n", + "4. Calculate the percentage change in the player's total rebounds from the 2007-2008 to the 2008-2009 season.\n", "\n", - "Now, could you please provide:\n", + "Let's proceed with these tasks and find the necessary information.\n", + "[Prompt tokens: 595, Completion tokens: 115]\n", + "---------- WebSearchAgent ----------\n", + "The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, who scored a total of 1,397 points.\n", "\n", - "1. The number of travelers and their ages.\n", - "2. Your budget for flights and accommodations (if applicable).\n", - "3. Any specific activities or attractions you're interested in while in Toronto.\n", + "In terms of his rebound statistics:\n", + "- In the 2007-2008 season, Dwyane Wade recorded 214 total rebounds.\n", + "- In the 2008-2009 season, he recorded 398 total rebounds.\n", "\n", - "This will help me provide more tailored options for your trip!\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-08T20:36:38.096554], User:\u001b[0m\n", + "To find the percentage change in his total rebounds, a data analyst would perform the following calculation:\n", "\n", - "just myself one adult\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-08T20:36:40.307824], FlightBroker:\u001b[0m\n", + "\\[\n", + "\\text{Percentage Change} = \\left( \\frac{398 - 214}{214} \\right) \\times 100\n", + "\\]\n", "\n", - "Thanks for the information! Here's what I have:\n", + "A data analyst would use the above numbers to determine the percentage change in his total rebounds between these two seasons.\n", + "[Prompt tokens: 794, Completion tokens: 154]\n", + "---------- PlanningAgent ----------\n", + "The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with a total of 1,397 points. His total rebounds increased from 214 in the 2007-2008 season to 398 in the 2008-2009 season.\n", "\n", - "- **Departure City:** New York\n", - "- **Destination City:** Toronto\n", - "- **Departure Date:** December 7\n", - "- **Return Date:** December 12\n", - "- **Number of Travelers:** 1 Adult\n", + "Let's have a data analyst calculate the percentage change: \n", "\n", - "Could you let me know if you have a budget for flights and accommodations? Additionally, are there any specific activities or attractions you're interested in while in Toronto? This will help me provide the best options for your trip!\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-08T20:36:45.875280], User:\u001b[0m\n", + "1. Data analyst: Calculate the percentage change in Dwyane Wade's total rebounds between the 2007-2008 and 2008-2009 seasons using the formula provided by the Web search agent.\n", + "[Prompt tokens: 878, Completion tokens: 116]\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionCall(id='call_Fh84DXp5MxFzutmKVvclw5Cz', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]\n", + "[Prompt tokens: 942, Completion tokens: 20]\n", + "---------- DataAnalystAgent ----------\n", + "[FunctionExecutionResult(content='85.98130841121495', call_id='call_Fh84DXp5MxFzutmKVvclw5Cz')]\n", + "---------- DataAnalystAgent ----------\n", + "The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade. The percentage change in his total rebounds between the 2007-2008 season and the 2008-2009 season was approximately 85.98%.\n", + "[Prompt tokens: 983, Completion tokens: 56]\n", + "---------- PlanningAgent ----------\n", + "The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with a total of 1,397 points. Between the 2007-2008 and 2008-2009 seasons, his total rebounds increased by approximately 85.98%. \n", "\n", - "that's it\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-08T20:36:50.925624], FlightBroker:\u001b[0m\n", - "\n", - "Your flights have been successfully booked! Here are the details:\n", - "\n", - "- **Departure:** New York to Toronto\n", - " - **Flight:** AL21\n", - " - **Date:** December 7, 2023\n", - "\n", - "- **Return:** Toronto to New York\n", - " - **Flight:** AL21\n", - " - **Date:** December 12, 2023\n", - "\n", - "If you need help with accommodations, activities, or anything else for your trip, feel free to let me know! \n", - "\n", - "TERMINATE" + "TERMINATE\n", + "[Prompt tokens: 1065, Completion tokens: 65]\n", + "---------- Summary ----------\n", + "Number of messages: 8\n", + "Finish reason: Text 'TERMINATE' mentioned\n", + "Total prompt tokens: 5257\n", + "Total completion tokens: 526\n", + "Duration: 11.98 seconds\n" ] }, { "data": { "text/plain": [ - "TeamRunResult(messages=[TextMessage(source='user', content='Help user plan a trip and book a flight.'), TextMessage(source='User', content=''), TextMessage(source='TravelAssistant', content=\"I'd be happy to help you plan your trip! To get started, could you please provide me with the following details:\\n\\n1. Your departure city and the destination city.\\n2. Your travel dates (departure and return).\\n3. The number of travelers and their ages (if any children are involved).\\n4. Your budget for flights and accommodations, if you have one in mind.\\n5. Any specific activities or attractions you're interested in at the destination.\\n\\nOnce I have this information, I can help you find the best options!\"), TextMessage(source='User', content='Traveling to toronto from new york'), TextMessage(source='TravelAssistant', content='Great choice! Toronto is a vibrant city with a lot to offer. Now, could you please provide the following additional details to help me assist you better?\\n\\n1. What are your travel dates (departure and return)?\\n2. How many travelers will be going, and what are their ages?\\n3. Do you have a budget for the flight and accommodations?\\n4. Are there any specific activities or attractions you’re interested in while in Toronto?\\n\\nOnce I have this information, I can help you find the best flights and suggestions for your trip!'), TextMessage(source='User', content='leaving on december 7 and returning on 12'), TextMessage(source='TravelAssistant', content=\"Thank you for the details! Here's what I have so far:\\n\\n- **Departure City:** New York\\n- **Destination City:** Toronto\\n- **Departure Date:** December 7\\n- **Return Date:** December 12\\n\\nNow, could you please provide:\\n\\n1. The number of travelers and their ages.\\n2. Your budget for flights and accommodations (if applicable).\\n3. Any specific activities or attractions you're interested in while in Toronto.\\n\\nThis will help me provide more tailored options for your trip!\"), TextMessage(source='User', content='just myself one adult'), TextMessage(source='FlightBroker', content=\"Thanks for the information! Here's what I have:\\n\\n- **Departure City:** New York\\n- **Destination City:** Toronto\\n- **Departure Date:** December 7\\n- **Return Date:** December 12\\n- **Number of Travelers:** 1 Adult\\n\\nCould you let me know if you have a budget for flights and accommodations? Additionally, are there any specific activities or attractions you're interested in while in Toronto? This will help me provide the best options for your trip!\"), TextMessage(source='User', content=\"that's it\"), StopMessage(source='FlightBroker', content='Your flights have been successfully booked! Here are the details:\\n\\n- **Departure:** New York to Toronto\\n - **Flight:** AL21\\n - **Date:** December 7, 2023\\n\\n- **Return:** Toronto to New York\\n - **Flight:** AL21\\n - **Date:** December 12, 2023\\n\\nIf you need help with accommodations, activities, or anything else for your trip, feel free to let me know! \\n\\nTERMINATE'), StopMessage(source='StopMessageTermination', content='Stop message received')])" + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Who was the Miami Heat player with the highest points in the 2006-2007 season, and what was the percentage change in his total rebounds between the 2007-2008 and 2008-2009 seasons?'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=595, completion_tokens=115), content=\"To solve this inquiry, let's break it down into smaller tasks again:\\n\\n1. Find out who was the Miami Heat player with the highest points in the 2006-2007 NBA season.\\n2. Find that player's total rebounds for the 2007-2008 NBA season.\\n3. Find that player's total rebounds for the 2008-2009 NBA season.\\n4. Calculate the percentage change in the player's total rebounds from the 2007-2008 to the 2008-2009 season.\\n\\nLet's proceed with these tasks and find the necessary information.\"), TextMessage(source='WebSearchAgent', models_usage=RequestUsage(prompt_tokens=794, completion_tokens=154), content='The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, who scored a total of 1,397 points.\\n\\nIn terms of his rebound statistics:\\n- In the 2007-2008 season, Dwyane Wade recorded 214 total rebounds.\\n- In the 2008-2009 season, he recorded 398 total rebounds.\\n\\nTo find the percentage change in his total rebounds, a data analyst would perform the following calculation:\\n\\n\\\\[\\n\\\\text{Percentage Change} = \\\\left( \\\\frac{398 - 214}{214} \\\\right) \\\\times 100\\n\\\\]\\n\\nA data analyst would use the above numbers to determine the percentage change in his total rebounds between these two seasons.'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=878, completion_tokens=116), content=\"The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with a total of 1,397 points. His total rebounds increased from 214 in the 2007-2008 season to 398 in the 2008-2009 season.\\n\\nLet's have a data analyst calculate the percentage change: \\n\\n1. Data analyst: Calculate the percentage change in Dwyane Wade's total rebounds between the 2007-2008 and 2008-2009 seasons using the formula provided by the Web search agent.\"), ToolCallMessage(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=942, completion_tokens=20), content=[FunctionCall(id='call_Fh84DXp5MxFzutmKVvclw5Cz', arguments='{\"start\":214,\"end\":398}', name='percentage_change_tool')]), ToolCallResultMessage(source='DataAnalystAgent', models_usage=None, content=[FunctionExecutionResult(content='85.98130841121495', call_id='call_Fh84DXp5MxFzutmKVvclw5Cz')]), TextMessage(source='DataAnalystAgent', models_usage=RequestUsage(prompt_tokens=983, completion_tokens=56), content='The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade. The percentage change in his total rebounds between the 2007-2008 season and the 2008-2009 season was approximately 85.98%.'), TextMessage(source='PlanningAgent', models_usage=RequestUsage(prompt_tokens=1065, completion_tokens=65), content='The Miami Heat player with the highest points in the 2006-2007 season was Dwyane Wade, with a total of 1,397 points. Between the 2007-2008 and 2008-2009 seasons, his total rebounds increased by approximately 85.98%. \\n\\nTERMINATE')], stop_reason=\"Text 'TERMINATE' mentioned\")" ] }, - "execution_count": 4, + "execution_count": 13, "metadata": {}, "output_type": "execute_result" } ], "source": [ - "user_proxy = UserProxyAgent(\"User\")\n", - "flight_broker = ToolUseAssistantAgent(\n", - " \"FlightBroker\",\n", - " description=\"An assistant for booking flights\",\n", - " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", - " registered_tools=[\n", - " FunctionTool(flight_search, description=\"Search for flights\"),\n", - " FunctionTool(flight_booking, description=\"Book a flight\"),\n", - " ],\n", - ")\n", - "travel_assistant = CodingAssistantAgent(\n", - " \"TravelAssistant\",\n", - " description=\"A travel assistant\",\n", - " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", - " system_message=\"You are a travel assistant.\",\n", - ")\n", + "def selector_func(messages: Sequence[AgentMessage]) -> str | None:\n", + " if messages[-1].source != planning_agent.name:\n", + " return planning_agent.name\n", + " return None\n", + "\n", "\n", - "termination = TextMentionTermination(\"TERMINATE\")\n", "team = SelectorGroupChat(\n", - " [user_proxy, flight_broker, travel_assistant],\n", + " [planning_agent, web_search_agent, data_analyst_agent],\n", " model_client=OpenAIChatCompletionClient(model=\"gpt-4o-mini\"),\n", " termination_condition=termination,\n", + " selector_func=selector_func,\n", ")\n", - "await team.run(task=\"Help user plan a trip and book a flight.\")" + "\n", + "await Console(team.run_stream(task=task))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You can see from the conversation log that the Planning Agent always speaks immediately after the specialized agents." ] } ], @@ -281,7 +458,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.6" + "version": "3.12.7" } }, "nbformat": 4, diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/selector-group-chat.svg b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/selector-group-chat.svg new file mode 100644 index 000000000000..4a4009992c4f --- /dev/null +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/selector-group-chat.svg @@ -0,0 +1,3 @@ + + +
Selector
Selector
Web Search Agent
Web Search Agent
Planning Agent
Planning Agent
Data Analyst
Agent
Data Analyst...
SelectorGroupChat
SelectorGroupChat
Application/User
Application/User
Task
Task
TaskResult
TaskResult
\ No newline at end of file diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/swarm.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/swarm.ipynb index f6b93a0abc3f..68e77690d45b 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/swarm.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/swarm.ipynb @@ -4,13 +4,550 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "# Swarm" + "# Swarm\n", + "\n", + "{py:class}`~autogen_agentchat.teams.Swarm` implements a team in which agents can hand off \n", + "task to other agents based on their capabilities. \n", + "It is a multi-agent design pattern first introduced by OpenAI in \n", + "[an experimental project](https://github.com/openai/swarm).\n", + "The key idea is to let agent delegate tasks to other agents using a special tool call, while\n", + "all agents share the same message context.\n", + "This enables agents to make local decisions about task planning, rather than\n", + "relying on a central orchestrator such as in {py:class}`~autogen_agentchat.teams.SelectorGroupChat`.\n", + "\n", + "```{note}\n", + "{py:class}`~autogen_agentchat.teams.Swarm` is a high-level API. If you need more\n", + "control and customization that is not supported by this API, you can take a look\n", + "at the [Handoff Pattern](../../core-user-guide/design-patterns/handoffs.ipynb)\n", + "in the Core API documentation and implement your own version of the Swarm pattern.\n", + "```\n", + "\n", + "## How Does It Work?\n", + "\n", + "At its core, the {py:class}`~autogen_agentchat.teams.Swarm` team is a group chat\n", + "where agents take turn to generate a response. \n", + "Similar to {py:class}`~autogen_agentchat.teams.SelectorGroupChat`\n", + "and {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`, participant agents\n", + "broadcast their responses so all agents share the same mesasge context.\n", + "\n", + "Different from the other two group chat teams, at each turn,\n", + "**the speaker agent is selected based on the most recent\n", + "{py:class}`~autogen_agentchat.messages.HandoffMessage` message in the context.**\n", + "This naturally requires each agent in the team to be able to generate\n", + "{py:class}`~autogen_agentchat.messages.HandoffMessage` to signal\n", + "which other agents that it hands off to.\n", + "\n", + "For {py:class}`~autogen_agentchat.agents.AssistantAgent`, you can set the\n", + "`handoffs` argument to specify which agents it can hand off to. You can\n", + "use {py:class}`~autogen_agentchat.agents.Handoff` to customize the message\n", + "content and handoff behavior.\n", + "\n", + "The overall process can be summarized as follows:\n", + "\n", + "1. Each agent has the ability to generate {py:class}`~autogen_agentchat.messages.HandoffMessage`\n", + " to signal which other agents it can hand off to. For {py:class}`~autogen_agentchat.agents.AssistantAgent`, this means setting the `handoffs` argument.\n", + "2. When the team starts on a task, the first speaker agents operate on the task and make locallized decision about whether to hand off and to whom.\n", + "3. When an agent generates a {py:class}`~autogen_agentchat.messages.HandoffMessage`, the receiving agent takes over the task with the same message context.\n", + "4. The process continues until a termination condition is met.\n", + "\n", + "In this section, we will show you two examples of how to use the {py:class}`~autogen_agentchat.teams.Swarm` team:\n", + "\n", + "1. A customer support team with human-in-the-loop handoff.\n", + "2. An automonous team for content generation." ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Customer Support Example" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![Customer Support](swarm_customer_support.svg)\n", + "\n", + "This system implements a flights refund scenario with two agents:\n", + "\n", + "- **Travel Agent**: Handles general travel and refund coordination.\n", + "- **Flights Refunder**: Specializes in processing flight refunds with the `refund_flight` tool.\n", + "\n", + "Additionally, we let the user interact with the agents, when agents handoff to `\"user\"`.\n", + "\n", + "#### Workflow\n", + "1. The **Travel Agent** initiates the conversation and evaluates the user's request.\n", + "2. Based on the request:\n", + " - For refund-related tasks, the Travel Agent hands off to the **Flights Refunder**.\n", + " - For information needed from the customer, either agent can hand off to the `\"user\"`.\n", + "3. The **Flights Refunder** processes refunds using the `refund_flight` tool when appropriate.\n", + "4. If an agent hands off to the `\"user\"`, the team execution will stop and wait for the user to input a response.\n", + "5. When the user provides input, it's sent back to the team as a {py:class}`~autogen_agentchat.messages.HandaffMessage`. This message is directed to the agent that originally requested user input.\n", + "6. The process continues until the Travel Agent determines the task is complete and terminates the workflow." + ] + }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "from typing import Any, Dict, List\n", + "\n", + "from autogen_agentchat.agents import AssistantAgent\n", + "from autogen_agentchat.messages import HandoffMessage\n", + "from autogen_agentchat.task import Console, HandoffTermination, TextMentionTermination\n", + "from autogen_agentchat.teams import Swarm\n", + "from autogen_ext.models import OpenAIChatCompletionClient" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Tools" + ] + }, + { + "cell_type": "code", + "execution_count": 3, + "metadata": {}, + "outputs": [], + "source": [ + "def refund_flight(flight_id: str) -> str:\n", + " \"\"\"Refund a flight\"\"\"\n", + " return f\"Flight {flight_id} refunded\"" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Agents" + ] + }, + { + "cell_type": "code", + "execution_count": 4, + "metadata": {}, + "outputs": [], + "source": [ + "model_client = OpenAIChatCompletionClient(\n", + " model=\"gpt-4o\",\n", + " # api_key=\"YOUR_API_KEY\",\n", + ")\n", + "\n", + "travel_agent = AssistantAgent(\n", + " \"travel_agent\",\n", + " model_client=model_client,\n", + " handoffs=[\"flights_refunder\", \"user\"],\n", + " system_message=\"\"\"You are a travel agent.\n", + " The flights_refunder is in charge of refunding flights.\n", + " If you need information from the user, you must first send your message, then you can handoff to the user.\n", + " Use TERMINATE when the travel planning is complete.\"\"\",\n", + ")\n", + "\n", + "flights_refunder = AssistantAgent(\n", + " \"flights_refunder\",\n", + " model_client=model_client,\n", + " handoffs=[\"travel_agent\", \"user\"],\n", + " tools=[refund_flight],\n", + " system_message=\"\"\"You are an agent specialized in refunding flights.\n", + " You only need flight reference numbers to refund a flight.\n", + " You have the ability to refund a flight using the refund_flight tool.\n", + " If you need information from the user, you must first send your message, then you can handoff to the user.\n", + " When the transaction is complete, handoff to the travel agent to finalize.\"\"\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 5, + "metadata": {}, + "outputs": [], + "source": [ + "termination = HandoffTermination(target=\"user\") | TextMentionTermination(\"TERMINATE\")\n", + "team = Swarm([travel_agent, flights_refunder], termination_condition=termination)" + ] + }, + { + "cell_type": "code", + "execution_count": 7, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "I need to refund my flight.\n", + "---------- travel_agent ----------\n", + "[FunctionCall(id='call_epnozsBbe9i4swPaaBIR4Enl', arguments='{}', name='transfer_to_flights_refunder')]\n", + "[Prompt tokens: 327, Completion tokens: 14]\n", + "---------- travel_agent ----------\n", + "[FunctionExecutionResult(content='Transferred to flights_refunder, adopting the role of flights_refunder immediately.', call_id='call_epnozsBbe9i4swPaaBIR4Enl')]\n", + "---------- travel_agent ----------\n", + "Transferred to flights_refunder, adopting the role of flights_refunder immediately.\n", + "---------- flights_refunder ----------\n", + "I can help you with that. Could you please provide me with your flight reference number so I can process the refund?\n", + "[Prompt tokens: 450, Completion tokens: 25]\n", + "---------- flights_refunder ----------\n", + "[FunctionCall(id='call_giMQVbQ7mXahC5G3eC0wvnCv', arguments='{}', name='transfer_to_user')]\n", + "[Prompt tokens: 483, Completion tokens: 11]\n", + "---------- flights_refunder ----------\n", + "[FunctionExecutionResult(content='Transferred to user, adopting the role of user immediately.', call_id='call_giMQVbQ7mXahC5G3eC0wvnCv')]\n", + "---------- flights_refunder ----------\n", + "Transferred to user, adopting the role of user immediately.\n", + "---------- Summary ----------\n", + "Number of messages: 8\n", + "Finish reason: Handoff to user from flights_refunder detected.\n", + "Total prompt tokens: 1260\n", + "Total completion tokens: 50\n", + "Duration: 1.79 seconds\n", + "---------- user ----------\n", + "Sure, it's 507811\n", + "---------- flights_refunder ----------\n", + "[FunctionCall(id='call_ACcFykJ3fPzanMwy1YGxG4L4', arguments='{\"flight_id\":\"507811\"}', name='refund_flight')]\n", + "[Prompt tokens: 530, Completion tokens: 18]\n", + "---------- flights_refunder ----------\n", + "[FunctionExecutionResult(content='Flight 507811 refunded', call_id='call_ACcFykJ3fPzanMwy1YGxG4L4')]\n", + "---------- flights_refunder ----------\n", + "Your flight with the reference number 507811 has been successfully refunded. If there is anything else you need help with, feel free to ask!\n", + "[Prompt tokens: 488, Completion tokens: 30]\n", + "---------- flights_refunder ----------\n", + "[FunctionCall(id='call_9NjAP8yD1qgwNL4Zfntt4dVb', arguments='{}', name='transfer_to_travel_agent')]\n", + "[Prompt tokens: 605, Completion tokens: 13]\n", + "---------- flights_refunder ----------\n", + "[FunctionExecutionResult(content='Transferred to travel_agent, adopting the role of travel_agent immediately.', call_id='call_9NjAP8yD1qgwNL4Zfntt4dVb')]\n", + "---------- flights_refunder ----------\n", + "Transferred to travel_agent, adopting the role of travel_agent immediately.\n", + "---------- travel_agent ----------\n", + "If you need further assistance with travel planning or any other inquiries, just let me know. Have a wonderful day!\n", + "[Prompt tokens: 495, Completion tokens: 24]\n", + "---------- travel_agent ----------\n", + "TERMINATE\n", + "[Prompt tokens: 525, Completion tokens: 4]\n", + "---------- Summary ----------\n", + "Number of messages: 9\n", + "Finish reason: Text 'TERMINATE' mentioned\n", + "Total prompt tokens: 2643\n", + "Total completion tokens: 89\n", + "Duration: 6.63 seconds\n" + ] + } + ], + "source": [ + "task = \"I need to refund my flight.\"\n", + "\n", + "\n", + "async def run_team_stream() -> None:\n", + " task_result = await Console(team.run_stream(task=task))\n", + " last_message = task_result.messages[-1]\n", + "\n", + " while isinstance(last_message, HandoffMessage) and last_message.target == \"user\":\n", + " user_message = input(\"User: \")\n", + "\n", + " task_result = await Console(\n", + " team.run_stream(task=HandoffMessage(source=\"user\", target=last_message.source, content=user_message))\n", + " )\n", + " last_message = task_result.messages[-1]\n", + "\n", + "\n", + "await run_team_stream()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Stock Research Example" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "![Stock Research](swarm_stock_research.svg)\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This system is designed to perform stock research tasks by leveraging four agents:\n", + "\n", + "- **Planner**: The central coordinator that delegates specific tasks to specialized agents based on their expertise. The planner ensures that each agent is utilized efficiently and oversees the overall workflow.\n", + "- **Financial Analyst**: A specialized agent responsible for analyzing financial metrics and stock data using tools such as `get_stock_data`.\n", + "- **News Analyst**: An agent focused on gathering and summarizing recent news articles relevant to the stock, using tools such as `get_news`.\n", + "- **Writer**: An agent tasked with compiling the findings from the stock and news analysis into a cohesive final report.\n", + "\n", + "#### Workflow\n", + "1. The **Planner** initiates the research process by delegating tasks to the appropriate agents in a step-by-step manner.\n", + "2. Each agent performs its task independently and appends their work to the shared **message thread/history**. Rather than directly returning results to the planner, all agents contribute to and read from this shared message history. When agents generate their work using the LLM, they have access to this shared message history, which provides context and helps track the overall progress of the task.\n", + "3. Once an agent completes its task, it hands off control back to the planner.\n", + "4. The process continues until the planner determines that all necessary tasks have been completed and decides to terminate the workflow." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Tools" + ] + }, + { + "cell_type": "code", + "execution_count": 16, + "metadata": {}, + "outputs": [], + "source": [ + "async def get_stock_data(symbol: str) -> Dict[str, Any]:\n", + " \"\"\"Get stock market data for a given symbol\"\"\"\n", + " return {\"price\": 180.25, \"volume\": 1000000, \"pe_ratio\": 65.4, \"market_cap\": \"700B\"}\n", + "\n", + "\n", + "async def get_news(query: str) -> List[Dict[str, str]]:\n", + " \"\"\"Get recent news articles about a company\"\"\"\n", + " return [\n", + " {\n", + " \"title\": \"Tesla Expands Cybertruck Production\",\n", + " \"date\": \"2024-03-20\",\n", + " \"summary\": \"Tesla ramps up Cybertruck manufacturing capacity at Gigafactory Texas, aiming to meet strong demand.\",\n", + " },\n", + " {\n", + " \"title\": \"Tesla FSD Beta Shows Promise\",\n", + " \"date\": \"2024-03-19\",\n", + " \"summary\": \"Latest Full Self-Driving beta demonstrates significant improvements in urban navigation and safety features.\",\n", + " },\n", + " {\n", + " \"title\": \"Model Y Dominates Global EV Sales\",\n", + " \"date\": \"2024-03-18\",\n", + " \"summary\": \"Tesla's Model Y becomes best-selling electric vehicle worldwide, capturing significant market share.\",\n", + " },\n", + " ]" + ] + }, + { + "cell_type": "code", + "execution_count": 17, + "metadata": {}, + "outputs": [], + "source": [ + "model_client = OpenAIChatCompletionClient(\n", + " model=\"gpt-4o\",\n", + " # api_key=\"YOUR_API_KEY\",\n", + ")\n", + "\n", + "planner = AssistantAgent(\n", + " \"planner\",\n", + " model_client=model_client,\n", + " handoffs=[\"financial_analyst\", \"news_analyst\", \"writer\"],\n", + " system_message=\"\"\"You are a research planning coordinator.\n", + " Coordinate market research by delegating to specialized agents:\n", + " - Financial Analyst: For stock data analysis\n", + " - News Analyst: For news gathering and analysis\n", + " - Writer: For compiling final report\n", + " Always send your plan first, then handoff to appropriate agent.\n", + " Handoff to a single agent at a time.\n", + " Use TERMINATE when research is complete.\"\"\",\n", + ")\n", + "\n", + "financial_analyst = AssistantAgent(\n", + " \"financial_analyst\",\n", + " model_client=model_client,\n", + " handoffs=[\"planner\"],\n", + " tools=[get_stock_data],\n", + " system_message=\"\"\"You are a financial analyst.\n", + " Analyze stock market data using the get_stock_data tool.\n", + " Provide insights on financial metrics.\n", + " Always handoff back to planner when analysis is complete.\"\"\",\n", + ")\n", + "\n", + "news_analyst = AssistantAgent(\n", + " \"news_analyst\",\n", + " model_client=model_client,\n", + " handoffs=[\"planner\"],\n", + " tools=[get_news],\n", + " system_message=\"\"\"You are a news analyst.\n", + " Gather and analyze relevant news using the get_news tool.\n", + " Summarize key market insights from news.\n", + " Always handoff back to planner when analysis is complete.\"\"\",\n", + ")\n", + "\n", + "writer = AssistantAgent(\n", + " \"writer\",\n", + " model_client=model_client,\n", + " handoffs=[\"planner\"],\n", + " system_message=\"\"\"You are a financial report writer.\n", + " Compile research findings into clear, concise reports.\n", + " Always handoff back to planner when writing is complete.\"\"\",\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 18, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- user ----------\n", + "Conduct market research for TSLA stock\n", + "---------- planner ----------\n", + "[FunctionCall(id='call_IXFe9RcGbYGNf0V7B2hvDNJI', arguments='{}', name='transfer_to_financial_analyst')]\n", + "[Prompt tokens: 168, Completion tokens: 149]\n", + "---------- planner ----------\n", + "[FunctionExecutionResult(content='Transferred to financial_analyst, adopting the role of financial_analyst immediately.', call_id='call_IXFe9RcGbYGNf0V7B2hvDNJI')]\n", + "---------- planner ----------\n", + "Transferred to financial_analyst, adopting the role of financial_analyst immediately.\n", + "---------- financial_analyst ----------\n", + "[FunctionCall(id='call_2IYcTAXiufX1SBmnMJOG9HPq', arguments='{\"symbol\":\"TSLA\"}', name='get_stock_data')]\n", + "[Prompt tokens: 136, Completion tokens: 16]\n", + "---------- financial_analyst ----------\n", + "[FunctionExecutionResult(content=\"{'price': 180.25, 'volume': 1000000, 'pe_ratio': 65.4, 'market_cap': '700B'}\", call_id='call_2IYcTAXiufX1SBmnMJOG9HPq')]\n", + "---------- financial_analyst ----------\n", + "Here's the market research for TSLA (Tesla) stock:\n", + "\n", + "- **Current Price**: $180.25\n", + "- **Trading Volume**: 1,000,000 shares\n", + "- **Price to Earnings (P/E) Ratio**: 65.4\n", + "- **Market Capitalization**: $700 Billion\n", + "\n", + "These metrics can help evaluate Tesla's stock performance, value in the market, and overall investment appeal. If you need a specific analysis or additional data, feel free to let me know!\n", + "[Prompt tokens: 162, Completion tokens: 103]\n", + "---------- financial_analyst ----------\n", + "[FunctionCall(id='call_ji8SdlXI1uga2SNenIZMvPOR', arguments='{}', name='transfer_to_planner')]\n", + "[Prompt tokens: 310, Completion tokens: 12]\n", + "---------- financial_analyst ----------\n", + "[FunctionExecutionResult(content='Transferred to planner, adopting the role of planner immediately.', call_id='call_ji8SdlXI1uga2SNenIZMvPOR')]\n", + "---------- financial_analyst ----------\n", + "Transferred to planner, adopting the role of planner immediately.\n", + "---------- planner ----------\n", + "[FunctionCall(id='call_aQUm1B1jzvnWF9aWLwfn2VxS', arguments='{}', name='transfer_to_news_analyst')]\n", + "[Prompt tokens: 346, Completion tokens: 14]\n", + "---------- planner ----------\n", + "[FunctionExecutionResult(content='Transferred to news_analyst, adopting the role of news_analyst immediately.', call_id='call_aQUm1B1jzvnWF9aWLwfn2VxS')]\n", + "---------- planner ----------\n", + "Transferred to news_analyst, adopting the role of news_analyst immediately.\n", + "---------- news_analyst ----------\n", + "[FunctionCall(id='call_n5RmgbQgdyfE7EX5NUsKwApq', arguments='{\"query\":\"Tesla stock performance\"}', name='get_news')]\n", + "[Prompt tokens: 291, Completion tokens: 16]\n", + "---------- news_analyst ----------\n", + "[FunctionExecutionResult(content='[{\\'title\\': \\'Tesla Expands Cybertruck Production\\', \\'date\\': \\'2024-03-20\\', \\'summary\\': \\'Tesla ramps up Cybertruck manufacturing capacity at Gigafactory Texas, aiming to meet strong demand.\\'}, {\\'title\\': \\'Tesla FSD Beta Shows Promise\\', \\'date\\': \\'2024-03-19\\', \\'summary\\': \\'Latest Full Self-Driving beta demonstrates significant improvements in urban navigation and safety features.\\'}, {\\'title\\': \\'Model Y Dominates Global EV Sales\\', \\'date\\': \\'2024-03-18\\', \\'summary\\': \"Tesla\\'s Model Y becomes best-selling electric vehicle worldwide, capturing significant market share.\"}]', call_id='call_n5RmgbQgdyfE7EX5NUsKwApq')]\n", + "---------- news_analyst ----------\n", + "Here are some recent news articles related to TSLA (Tesla) stock that may influence its market performance:\n", + "\n", + "1. **Tesla Expands Cybertruck Production** (March 20, 2024)\n", + " - Tesla has ramped up its Cybertruck manufacturing capacity at the Gigafactory in Texas, aiming to meet the strong demand for this vehicle.\n", + "\n", + "2. **Tesla FSD Beta Shows Promise** (March 19, 2024)\n", + " - The latest Full Self-Driving (FSD) beta update demonstrates notable improvements in urban navigation and safety features, suggesting potential advancements in Tesla's autonomous driving technology.\n", + "\n", + "3. **Model Y Dominates Global EV Sales** (March 18, 2024)\n", + " - Tesla's Model Y has become the best-selling electric vehicle worldwide, capturing a significant share of the market, which could positively impact the company's revenue streams.\n", + "\n", + "If you'd like a more detailed analysis or further information, please let me know!\n", + "[Prompt tokens: 414, Completion tokens: 192]\n", + "---------- news_analyst ----------\n", + "[FunctionCall(id='call_7Ka5f5k2yZ8flfvZWKNXDQjL', arguments='{}', name='transfer_to_planner')]\n", + "[Prompt tokens: 654, Completion tokens: 12]\n", + "---------- news_analyst ----------\n", + "[FunctionExecutionResult(content='Transferred to planner, adopting the role of planner immediately.', call_id='call_7Ka5f5k2yZ8flfvZWKNXDQjL')]\n", + "---------- news_analyst ----------\n", + "Transferred to planner, adopting the role of planner immediately.\n", + "---------- planner ----------\n", + "[FunctionCall(id='call_zl0E18TZWoCPykYqG7jpR2mr', arguments='{}', name='transfer_to_writer')]\n", + "[Prompt tokens: 611, Completion tokens: 11]\n", + "---------- planner ----------\n", + "[FunctionExecutionResult(content='Transferred to writer, adopting the role of writer immediately.', call_id='call_zl0E18TZWoCPykYqG7jpR2mr')]\n", + "---------- planner ----------\n", + "Transferred to writer, adopting the role of writer immediately.\n", + "---------- writer ----------\n", + "### Market Research Report: Tesla (TSLA) Stock\n", + "\n", + "#### Stock Performance Overview\n", + "\n", + "- **Current Price**: $180.25\n", + "- **Trading Volume**: 1,000,000 shares\n", + "- **Price to Earnings (P/E) Ratio**: 65.4\n", + "- **Market Capitalization**: $700 Billion\n", + "\n", + "Tesla's stock is currently valued at $180.25 per share, with a high trading volume reflecting active investor interest. The P/E ratio of 65.4 indicates that the stock might be viewed as overvalued compared to traditional industries, but it is common for tech and innovation-driven companies. The market capitalization of $700 billion underscores Tesla’s significant presence in the automotive and technology markets.\n", + "\n", + "#### Recent News Impacting Tesla (TSLA)\n", + "\n", + "1. **Expansion of Cybertruck Production**: Tesla has increased its Cybertruck production at the Texas Gigafactory, responding to heightened demand. This expansion strategy could drive future revenues and solidify Tesla's innovative image.\n", + "\n", + "2. **Advancements in Full Self-Driving (FSD) Technology**: The latest Tesla FSD beta update highlights promising improvements. Enhanced safety and urban navigation may bolster Tesla's reputation in the autonomous vehicle domain, potentially increasing its market value.\n", + "\n", + "3. **Model Y's Global Sales Leadership**: The Model Y has emerged as the leading global electric vehicle in terms of sales. This achievement not only boosts Tesla's revenue streams but also cements its position as a leader in the EV segment.\n", + "\n", + "### Conclusion\n", + "\n", + "Tesla’s market dynamics show strong innovation and consumer interest, which reflect positively in its stock market valuation and significant news coverage. The company continues to lead in the electric vehicle market and expand its capabilities in autonomous driving, thereby potentially increasing its financial performance and market leadership.\n", + "\n", + "For further analysis or inquiries, feel free to reach out.\n", + "[Prompt tokens: 489, Completion tokens: 371]\n", + "---------- writer ----------\n", + "[FunctionCall(id='call_9buNd5ud2MTRyX50X2EjQJqp', arguments='{}', name='transfer_to_planner')]\n", + "[Prompt tokens: 865, Completion tokens: 12]\n", + "---------- writer ----------\n", + "[FunctionExecutionResult(content='Transferred to planner, adopting the role of planner immediately.', call_id='call_9buNd5ud2MTRyX50X2EjQJqp')]\n", + "---------- writer ----------\n", + "Transferred to planner, adopting the role of planner immediately.\n", + "---------- planner ----------\n", + "TERMINATE\n", + "[Prompt tokens: 1037, Completion tokens: 4]\n", + "---------- Summary ----------\n", + "Number of messages: 27\n", + "Finish reason: Text 'TERMINATE' mentioned\n", + "Total prompt tokens: 5483\n", + "Total completion tokens: 912\n", + "Duration: 15.26 seconds\n" + ] + } + ], + "source": [ + "# Define termination condition\n", + "text_termination = TextMentionTermination(\"TERMINATE\")\n", + "termination = text_termination\n", + "\n", + "research_team = Swarm(\n", + " participants=[planner, financial_analyst, news_analyst, writer], termination_condition=termination\n", + ")\n", + "\n", + "task = \"Conduct market research for TSLA stock\"\n", + "await Console(research_team.run_stream(task=task))" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { + "kernelspec": { + "display_name": "autogen", + "language": "python", + "name": "python3" + }, "language_info": { - "name": "python" + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.12.7" } }, "nbformat": 4, diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/swarm_customer_support.svg b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/swarm_customer_support.svg new file mode 100644 index 000000000000..c2dcde2bba67 --- /dev/null +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/swarm_customer_support.svg @@ -0,0 +1,3 @@ + + +
Application/User
Application/User
Team
Team
Travel Agent
Travel Agent
Flights Refunder Agent
Flights Refunder Age...
Handoff Message
Handoff Message
refund_flight
refund_flight
Handoff
Message
Handoff...
\ No newline at end of file diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/swarm_stock_research.svg b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/swarm_stock_research.svg new file mode 100644 index 000000000000..f75d43269caf --- /dev/null +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/swarm_stock_research.svg @@ -0,0 +1,3 @@ + + +
Planner
Planner
Writer
Writer
News 
%3CmxGraphModel%3E%3Croot%3E%3CmxCell%20id%3D%220%22%2F%3E%3CmxCell%20id%3D%221%22%20parent%3D%220%22%2F%3E%3CmxCell%20id%3D%222%22%20value%3D%22Writer%22%20style%3D%22rounded%3D1%3BwhiteSpace%3Dwrap%3Bhtml%3D1%3BstrokeColor%3D%239999FF%3BgradientColor%3Ddefault%3BfillColor%3Dnone%3B%22%20vertex%3D%221%22%20parent%3D%221%22%3E%3CmxGeometry%20x%3D%22780%22%20y%3D%22377%22%20width%3D%22120%22%20height%3D%2260%22%20as%3D%22geometry%22%2F%3E%3C%2FmxCell%3E%3C%2Froot%3E%3C%2FmxGraphModel%3EAnalyst
News...
Financial
Analyst
Financial...
Handoff
Handoff
Handoff
Handoff
Handoff
Handoff
get_stock_data
get_stock_data
get_news
get_news
\ No newline at end of file diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/teams.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/teams.ipynb index 8d001f897038..51976a5370d1 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/teams.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/teams.ipynb @@ -22,14 +22,14 @@ "\n", "AgentChat provides several preset teams that implements one or more [multi-agent design patterns](../../core-user-guide/design-patterns/index.md) to simplify development. Here is a list of the preset teams:\n", "\n", - "- {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`: All participants share context and takes turn to respond in a round-robin fashion.\n", - "- {py:class}`~autogen_agentchat.teams.SelectorGroupChat`: All participants share context and use a model-based selector (with custom override) to select the next agent to respond.\n", - "- {py:class}`~autogen_agentchat.teams.Swarm`: All participants share context and use {py:class}`~autogen_agentchat.messages.HandoffMessage`to pass control to the next agent.\n", + "- {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`: All participants share context and takes turn to respond in a round-robin fashion. We will cover this team in this section.\n", + "- {py:class}`~autogen_agentchat.teams.SelectorGroupChat`: All participants share context and use a model-based selector (with custom override) to select the next agent to respond. See [Selector Group Chat](./selector-group-chat.ipynb) for more details.\n", + "- {py:class}`~autogen_agentchat.teams.Swarm`: All participants share context and use {py:class}`~autogen_agentchat.messages.HandoffMessage`to pass control to the next agent. See [Swarm](./swarm.ipynb) for more details.\n", "\n", "At a high-level, a team API consists of the following methods:\n", "\n", - "- {py:meth}`~autogen_agentchat.base.TaskRunner.run`: To process a task, which can be a {py:class}`str`, {py:class}`~autogen_agentchat.messages.TextMessage`, or {py:class}`~autogen_agentchat.messages.MultiModalMessage`, and returns {py:class}`~autogen_agentchat.base.TaskResult`. The task can also be `None` to resume processing the previous task if the team has not been reset.\n", - "- {py:meth}`~autogen_agentchat.base.TaskRunner.run_stream`: Same as {py:meth}`~autogen_agentchat.base.TaskRunner.run`, but returns a async generator of messages and the final task result.\n", + "- {py:meth}`~autogen_agentchat.base.TaskRunner.run`: Process a task, which can be a {py:class}`str`, {py:class}`~autogen_agentchat.messages.TextMessage`, {py:class}`~autogen_agentchat.messages.MultiModalMessage`, or {py:class}`~autogen_agentchat.messages.HandoffMessage`, and returns {py:class}`~autogen_agentchat.base.TaskResult`. The task can also be `None` to resume processing the previous task if the team has not been reset.\n", + "- {py:meth}`~autogen_agentchat.base.TaskRunner.run_stream`: Similar to {py:meth}`~autogen_agentchat.base.TaskRunner.run`, but it returns an async generator of messages and the final task result.\n", "- {py:meth}`~autogen_agentchat.base.Team.reset`: To reset the team state if the next task is not related to the previous task. Otherwise, the team can utilize the context from the previous task to process the next one.\n", "\n", "In this section, we will be using the\n", @@ -42,6 +42,9 @@ "source": [ "## Round-Robin Group Chat\n", "\n", + "{py:class}`~autogen_agentchat.teams.RoundRobinGroupChat` is a simple team that allows all agents to share context and take turns to respond in a round-robin fashion.\n", + "On its turn, each agent broadcasts its response to all other agents in the team, so all agents have the same context.\n", + "\n", "We will start by creating a team with a single {py:class}`~autogen_agentchat.agents.AssistantAgent` agent\n", "and {py:class}`~autogen_agentchat.task.TextMentionTermination`\n", "termination condition that stops the team when a word is detected." @@ -281,7 +284,7 @@ "outputs": [], "source": [ "from autogen_agentchat.agents import AssistantAgent\n", - "from autogen_agentchat.task import MaxMessageTermination, TextMentionTermination, Console\n", + "from autogen_agentchat.task import Console, MaxMessageTermination, TextMentionTermination\n", "from autogen_agentchat.teams import RoundRobinGroupChat\n", "from autogen_ext.models import OpenAIChatCompletionClient\n", "\n", @@ -764,16 +767,6 @@ "# Use `asyncio.run(Console(lazy_agent_team.run_stream(task=\"It is raining in New York.\")))` when running in a script.\n", "await Console(lazy_agent_team.run_stream(task=\"It is raining in New York.\"))" ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "```{note}\n", - "Currently the handoff termination approach does not work with {py:class}`~autogen_agentchat.teams.Swarm`.\n", - "Please stay tuned for the updates.\n", - "```" - ] } ], "metadata": { diff --git a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb index e10942491286..67c7582a311e 100644 --- a/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/agentchat-user-guide/tutorial/termination.ipynb @@ -6,56 +6,77 @@ "source": [ "# Termination \n", "\n", - "\n", - "In the previous section, we explored how to define agents, and organize them into teams that can solve tasks by communicating (a conversation). However, conversations can go on forever, and in many cases, we need to know _when_ to stop them. This is the role of the termination condition.\n", + "In the previous section, we explored how to define agents, and organize them into teams that can solve tasks. However, a run can go on forever, and in many cases, we need to know _when_ to stop them. This is the role of the termination condition.\n", "\n", "AgentChat supports several termination condition by providing a base {py:class}`~autogen_agentchat.base.TerminationCondition` class and several implementations that inherit from it.\n", "\n", - "A termination condition is a callable that takes a sequence of ChatMessage objects since the last time the condition was called, and returns a StopMessage if the conversation should be terminated, or None otherwise. Once a termination condition has been reached, it must be reset before it can be used again.\n", + "A termination condition is a callable that takes a sequece of {py:class}`~autogen_agentchat.messages.AgentMessage` objects **since the last time the condition was called**, and returns a {py:class}`~autogen_agentchat.messages.StopMessage` if the conversation should be terminated, or `None` otherwise.\n", + "Once a termination condition has been reached, it must be reset by calling {py:meth}`~autogen_agentchat.base.TerminationCondition.reset` before it can be used again.\n", "\n", "Some important things to note about termination conditions: \n", - "- They are stateful, and must be reset before they can be used again. \n", - "- They can be combined using the AND and OR operators. \n", - "- They are implemented/enforced by the team, and not by the agents. An agent may signal or request termination e.g., by sending a StopMessage, but the team is responsible for enforcing it.\n" + "- They are stateful but reset automatically after each run ({py:meth}`~autogen_agentchat.base.TaskRunner.run` or {py:meth}`~autogen_agentchat.base.TaskRunner.run_stream`) is finished.\n", + "- They can be combined using the AND and OR operators.\n", + "\n", + "```{note}\n", + "For group chat teams (i.e., {py:class}`~autogen_agentchat.teams.RoundRobinGroupChat`,\n", + "{py:class}`~autogen_agentchat.teams.SelectorGroupChat`, and {py:class}`~autogen_agentchat.teams.Swarm`),\n", + "the termination condition is called after each agent responds.\n", + "While a response may contain multiple inner messages, the team calls its termination condition just once for all the messages from a single response.\n", + "So the condition is called with the \"delta sequence\" of messages since the last time it was called.\n", + "```" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# AutoGen provides several built-in termination conditions: \n", + "1. {py:class}`~autogen_agentchat.task.MaxMessageTermination`: Stops after a specified number of messages have been produced, including both agent and task messages.\n", + "2. {py:class}`~autogen_agentchat.task.TextMentionTermination`: Stops when specific text or string is mentioned in a message (e.g., \"TERMINATE\").\n", + "3. {py:class}`~autogen_agentchat.task.TokenUsageTermination`: Stops when a certain number of prompt or completion tokens are used. This requires the agents to report token usage in their messages.\n", + "4. {py:class}`~autogen_agentchat.task.TimeoutTermination`: Stops after a specified duration in seconds.\n", + "5. {py:class}`~autogen_agentchat.task.HandoffTermination`: Stops when a handoff to a specific target is requested. Handoff messages can be used to build patterns such as {py:class}`~autogen_agentchat.teams.Swarm`. This is useful when you want to pause the run and allow application or user to provide input when an agent hands off to them.\n", + "6. {py:class}`~autogen_agentchat.task.SourceMatchTermination`: Stops after a specific agent responds.\n", + "7. {py:class}`~autogen_agentchat.task.ExternalTermination`: Enables programmatic control of termination from outside the run. This is useful for UI integration (e.g., \"Stop\" buttons in chat interfaces).\n", + "8. {py:class}`~autogen_agentchat.task.StopMessageTermination`: Stops when a {py:class}`~autogen_agentchat.messages.StopMessage` is produced by an agent." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "To begin, let us define a simple team with only one agent and then explore how multiple termination conditions can be applied to guide the resulting behavior." + "To demonstrate the characteristics of termination conditions, we'll create a team consisting of two agents: a primary agent responsible for text generation and a critic agent that reviews and provides feedback on the generated text." ] }, { "cell_type": "code", - "execution_count": null, + "execution_count": 3, "metadata": {}, "outputs": [], "source": [ - "import logging\n", - "\n", - "from autogen_agentchat import EVENT_LOGGER_NAME\n", - "from autogen_agentchat.agents import CodingAssistantAgent\n", - "from autogen_agentchat.logging import ConsoleLogHandler\n", - "from autogen_agentchat.task import MaxMessageTermination, TextMentionTermination\n", + "from autogen_agentchat.agents import AssistantAgent\n", + "from autogen_agentchat.task import Console, MaxMessageTermination, TextMentionTermination\n", "from autogen_agentchat.teams import RoundRobinGroupChat\n", - "from autogen_core.components.models import OpenAIChatCompletionClient\n", - "\n", - "logger = logging.getLogger(EVENT_LOGGER_NAME)\n", - "logger.addHandler(ConsoleLogHandler())\n", - "logger.setLevel(logging.INFO)\n", - "\n", + "from autogen_ext.models import OpenAIChatCompletionClient\n", "\n", "model_client = OpenAIChatCompletionClient(\n", - " model=\"gpt-4o-2024-08-06\",\n", + " model=\"gpt-4o\",\n", " temperature=1,\n", " # api_key=\"sk-...\", # Optional if you have an OPENAI_API_KEY env variable set.\n", ")\n", "\n", - "writing_assistant_agent = CodingAssistantAgent(\n", - " name=\"writing_assistant_agent\",\n", - " system_message=\"You are a helpful assistant that solve tasks by generating text responses and code.\",\n", + "# Create the primary agent.\n", + "primary_agent = AssistantAgent(\n", + " \"primary\",\n", + " model_client=model_client,\n", + " system_message=\"You are a helpful AI assistant.\",\n", + ")\n", + "\n", + "# Create the critic agent.\n", + "critic_agent = AssistantAgent(\n", + " \"critic\",\n", " model_client=model_client,\n", + " system_message=\"Provide constructive feedback for every message. Respond with 'APPROVE' to when your feedbacks are addressed.\",\n", ")" ] }, @@ -63,9 +84,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## MaxMessageTermination \n", - "\n", - "The simplest termination condition is the {py:class}`~autogen_agentchat.teams.MaxMessageTermination` condition, which terminates the conversation after a fixed number of messages. \n" + "Let's explore how termination conditions automatically reset after each `run` or `run_stream` call, allowing the team to resume its conversation from where it left off." ] }, { @@ -77,62 +96,117 @@ "name": "stdout", "output_type": "stream", "text": [ - "\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-19T12:19:28.807176]:\u001b[0m\n", - "\n", + "---------- user ----------\n", "Write a unique, Haiku about the weather in Paris\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-19T12:19:29.604935], writing_assistant_agent:\u001b[0m\n", - "\n", + "---------- primary ----------\n", "Gentle rain whispers, \n", - "Eiffel veiled in mist’s embrace, \n", - "Spring’s soft sigh in France.\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-19T12:19:30.168531], writing_assistant_agent:\u001b[0m\n", + "Cobblestones glisten softly— \n", + "Paris dreams in gray.\n", + "[Prompt tokens: 30, Completion tokens: 19]\n", + "---------- critic ----------\n", + "The Haiku captures the essence of a rainy day in Paris beautifully, and the imagery is vivid. However, it's important to ensure the use of the traditional 5-7-5 syllable structure for Haikus. Your current Haiku lines are composed of 4-7-5 syllables, which slightly deviates from the form. Consider revising the first line to fit the structure.\n", "\n", - "Gentle rain whispers, \n", - "Eiffel veiled in mist’s embrace, \n", - "Spring’s soft sigh in France.\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-19T12:19:31.213291], writing_assistant_agent:\u001b[0m\n", - "\n", - "Gentle rain whispers, \n", - "Eiffel veiled in mist’s embrace, \n", - "Spring’s soft sigh in France.\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-19T12:19:31.213655], Termination:\u001b[0m\n", + "For example:\n", + "Soft rain whispers down, \n", + "Cobblestones glisten softly — \n", + "Paris dreams in gray.\n", "\n", - "Maximal number of messages 3 reached, current message count: 3" + "This revision maintains the essence of your original lines while adhering to the traditional Haiku structure.\n", + "[Prompt tokens: 70, Completion tokens: 120]\n", + "---------- Summary ----------\n", + "Number of messages: 3\n", + "Finish reason: Maximum number of messages 3 reached, current message count: 3\n", + "Total prompt tokens: 100\n", + "Total completion tokens: 139\n", + "Duration: 3.34 seconds\n" ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Write a unique, Haiku about the weather in Paris'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=30, completion_tokens=19), content='Gentle rain whispers, \\nCobblestones glisten softly— \\nParis dreams in gray.'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=70, completion_tokens=120), content=\"The Haiku captures the essence of a rainy day in Paris beautifully, and the imagery is vivid. However, it's important to ensure the use of the traditional 5-7-5 syllable structure for Haikus. Your current Haiku lines are composed of 4-7-5 syllables, which slightly deviates from the form. Consider revising the first line to fit the structure.\\n\\nFor example:\\nSoft rain whispers down, \\nCobblestones glisten softly — \\nParis dreams in gray.\\n\\nThis revision maintains the essence of your original lines while adhering to the traditional Haiku structure.\")], stop_reason='Maximum number of messages 3 reached, current message count: 3')" + ] + }, + "execution_count": 4, + "metadata": {}, + "output_type": "execute_result" } ], "source": [ "max_msg_termination = MaxMessageTermination(max_messages=3)\n", - "round_robin_team = RoundRobinGroupChat([writing_assistant_agent], termination_condition=max_msg_termination)\n", - "round_robin_team_result = await round_robin_team.run(task=\"Write a unique, Haiku about the weather in Paris\")" + "round_robin_team = RoundRobinGroupChat([primary_agent, critic_agent], termination_condition=max_msg_termination)\n", + "\n", + "# Use asyncio.run(...) if you are running this script as a standalone script.\n", + "await Console(round_robin_team.run_stream(task=\"Write a unique, Haiku about the weather in Paris\"))" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "We see that the conversation is terminated after the specified number of messages have been sent by the agent." + "The conversation stopped after reaching the maximum message limit. Since the primary agent didn't get to respond to the feedback, let's continue the conversation." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "---------- primary ----------\n", + "Thank you for your feedback. Here is the revised Haiku:\n", + "\n", + "Soft rain whispers down, \n", + "Cobblestones glisten softly — \n", + "Paris dreams in gray.\n", + "[Prompt tokens: 181, Completion tokens: 32]\n", + "---------- critic ----------\n", + "The revised Haiku now follows the traditional 5-7-5 syllable pattern, and it still beautifully captures the atmospheric mood of Paris in the rain. The imagery and flow are both clear and evocative. Well done on making the adjustment! \n", + "\n", + "APPROVE\n", + "[Prompt tokens: 234, Completion tokens: 54]\n", + "---------- primary ----------\n", + "Thank you for your kind words and approval. I'm glad the revision meets your expectations and captures the essence of Paris. If you have any more requests or need further assistance, feel free to ask!\n", + "[Prompt tokens: 279, Completion tokens: 39]\n", + "---------- Summary ----------\n", + "Number of messages: 3\n", + "Finish reason: Maximum number of messages 3 reached, current message count: 3\n", + "Total prompt tokens: 694\n", + "Total completion tokens: 125\n", + "Duration: 6.43 seconds\n" + ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=181, completion_tokens=32), content='Thank you for your feedback. Here is the revised Haiku:\\n\\nSoft rain whispers down, \\nCobblestones glisten softly — \\nParis dreams in gray.'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=234, completion_tokens=54), content='The revised Haiku now follows the traditional 5-7-5 syllable pattern, and it still beautifully captures the atmospheric mood of Paris in the rain. The imagery and flow are both clear and evocative. Well done on making the adjustment! \\n\\nAPPROVE'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=279, completion_tokens=39), content=\"Thank you for your kind words and approval. I'm glad the revision meets your expectations and captures the essence of Paris. If you have any more requests or need further assistance, feel free to ask!\")], stop_reason='Maximum number of messages 3 reached, current message count: 3')" + ] + }, + "execution_count": 5, + "metadata": {}, + "output_type": "execute_result" + } + ], + "source": [ + "# Use asyncio.run(...) if you are running this script as a standalone script.\n", + "await Console(round_robin_team.run_stream())" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## StopMessageTermination\n", - "\n", - "In this scenario, the team terminates the conversation if any agent sends a `StopMessage`. So, when does an agent send a `StopMessage`? Typically, this is implemented in the `on_message` method of the agent, where the agent can check the incoming message and decide to send a `StopMessage` based on some condition. \n", - "\n", - "A common pattern here is prompt the agent (or some agent participating in the conversation) to emit a specific text string in it's response, which can be used to trigger the termination condition. \n", - "\n", - "In fact, if you review the code implementation for the default `CodingAssistantAgent` class provided by AgentChat, you will observe two things\n", - "- The default `system_message` instructs the agent to end their response with the word \"terminate\" if they deem the task to be completed\n", - "- in the `on_message` method, the agent checks if the incoming message contains the text \"terminate\" and returns a `StopMessage` if it does. " + "The team continued from where it left off, allowing the primary agent to respond to the feedback." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, let's show how termination conditions can be combined using the AND (`&`) and OR (`|`) operators to create more complex termination logic. For example, we'll create a team that stops either after 10 messages are generated or when the critic agent approves a message.\n" ] }, { @@ -144,37 +218,64 @@ "name": "stdout", "output_type": "stream", "text": [ - "\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-19T12:19:31.218855]:\u001b[0m\n", - "\n", + "---------- user ----------\n", "Write a unique, Haiku about the weather in Paris\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-19T12:19:31.752676], writing_assistant_agent:\u001b[0m\n", - "\n", - "Mist hugs the Eiffel, \n", - "Soft rain kisses cobblestones, \n", - "Autumn whispers past. \n", - "\n", - "TERMINATE\n", - "--------------------------------------------------------------------------- \n", - "\u001b[91m[2024-10-19T12:19:31.753265], Termination:\u001b[0m\n", + "---------- primary ----------\n", + "Spring breeze gently hums, \n", + "Cherry blossoms in full bloom— \n", + "Paris wakes to life.\n", + "[Prompt tokens: 467, Completion tokens: 19]\n", + "---------- critic ----------\n", + "The Haiku beautifully captures the awakening of Paris in the spring. The imagery of a gentle spring breeze and cherry blossoms in full bloom effectively conveys the rejuvenating feel of the season. The final line, \"Paris wakes to life,\" encapsulates the renewed energy and vibrancy of the city. The Haiku adheres to the 5-7-5 syllable structure and portrays a vivid seasonal transformation in a concise and poetic manner. Excellent work!\n", "\n", - "Stop message received" + "APPROVE\n", + "[Prompt tokens: 746, Completion tokens: 93]\n", + "---------- Summary ----------\n", + "Number of messages: 3\n", + "Finish reason: Text 'APPROVE' mentioned\n", + "Total prompt tokens: 1213\n", + "Total completion tokens: 112\n", + "Duration: 2.75 seconds\n" ] + }, + { + "data": { + "text/plain": [ + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Write a unique, Haiku about the weather in Paris'), TextMessage(source='primary', models_usage=RequestUsage(prompt_tokens=467, completion_tokens=19), content='Spring breeze gently hums, \\nCherry blossoms in full bloom— \\nParis wakes to life.'), TextMessage(source='critic', models_usage=RequestUsage(prompt_tokens=746, completion_tokens=93), content='The Haiku beautifully captures the awakening of Paris in the spring. The imagery of a gentle spring breeze and cherry blossoms in full bloom effectively conveys the rejuvenating feel of the season. The final line, \"Paris wakes to life,\" encapsulates the renewed energy and vibrancy of the city. The Haiku adheres to the 5-7-5 syllable structure and portrays a vivid seasonal transformation in a concise and poetic manner. Excellent work!\\n\\nAPPROVE')], stop_reason=\"Text 'APPROVE' mentioned\")" + ] + }, + "execution_count": 9, + "metadata": {}, + "output_type": "execute_result" } ], "source": [ - "writing_assistant_agent = CodingAssistantAgent(\n", - " name=\"writing_assistant_agent\",\n", - " system_message=\"You are a helpful assistant that solve tasks by generating text responses and code. Respond with TERMINATE when the task is done.\",\n", - " model_client=model_client,\n", - ")\n", + "max_msg_termination = MaxMessageTermination(max_messages=10)\n", + "text_termination = TextMentionTermination(\"APPROVE\")\n", + "combined_termination = max_msg_termination | text_termination\n", "\n", - "text_termination = TextMentionTermination(\"TERMINATE\")\n", - "round_robin_team = RoundRobinGroupChat([writing_assistant_agent], termination_condition=text_termination)\n", + "round_robin_team = RoundRobinGroupChat([primary_agent, critic_agent], termination_condition=combined_termination)\n", + "\n", + "# Use asyncio.run(...) if you are running this script as a standalone script.\n", + "await Console(round_robin_team.run_stream(task=\"Write a unique, Haiku about the weather in Paris\"))" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The conversation stopped after the critic agent approved the message, although it could have also stopped if 10 messages were generated.\n", "\n", - "round_robin_team_result = await round_robin_team.run(task=\"Write a unique, Haiku about the weather in Paris\")" + "Alternatively, if we want to stop the run only when both conditions are met, we can use the AND (`&`) operator." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "combined_termination = max_msg_termination & text_termination" ] } ], @@ -194,7 +295,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.12.6" + "version": "3.11.5" } }, "nbformat": 4, diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/azure-openai-with-aad-auth.md b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/azure-openai-with-aad-auth.md index c8e4b632bd03..b347ed7de251 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/azure-openai-with-aad-auth.md +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/azure-openai-with-aad-auth.md @@ -24,15 +24,11 @@ token_provider = get_bearer_token_provider( ) client = AzureOpenAIChatCompletionClient( - model="{your-azure-deployment}", + azure_deployment="{your-azure-deployment}", + model="{model-name, such as gpt-4o}", api_version="2024-02-01", azure_endpoint="https://{your-custom-endpoint}.openai.azure.com/", azure_ad_token_provider=token_provider, - model_capabilities={ - "vision":True, - "function_calling":True, - "json_output":True, - } ) ``` diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/local-llms-ollama-litellm.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/local-llms-ollama-litellm.ipynb index a90cb440d6ce..80fde2b71017 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/local-llms-ollama-litellm.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/local-llms-ollama-litellm.ipynb @@ -46,10 +46,10 @@ "from autogen_core.components.models import (\n", " AssistantMessage,\n", " ChatCompletionClient,\n", - " OpenAIChatCompletionClient,\n", " SystemMessage,\n", " UserMessage,\n", - ")" + ")\n", + "from autogen_ext.models import OpenAIChatCompletionClient" ] }, { @@ -65,7 +65,7 @@ "metadata": {}, "outputs": [], "source": [ - "def get_model_client() -> OpenAIChatCompletionClient:\n", + "def get_model_client() -> OpenAIChatCompletionClient: # type: ignore\n", " \"Mimic OpenAI API using Local LLM Server.\"\n", " return OpenAIChatCompletionClient(\n", " model=\"gpt-4o\", # Need to use one of the OpenAI models as a placeholder for now.\n", @@ -233,7 +233,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.9" + "version": "3.12.7" } }, "nbformat": 4, diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/structured-output-agent.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/structured-output-agent.ipynb index fa50d6da2797..95edbfa0c257 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/structured-output-agent.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/cookbook/structured-output-agent.ipynb @@ -57,7 +57,7 @@ }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -79,15 +79,11 @@ "\n", "# Create the client with type-checked environment variables\n", "client = AzureOpenAIChatCompletionClient(\n", - " model=get_env_variable(\"AZURE_OPENAI_DEPLOYMENT_NAME\"),\n", + " azure_deployment=get_env_variable(\"AZURE_OPENAI_DEPLOYMENT_NAME\"),\n", + " model=get_env_variable(\"AZURE_OPENAI_MODEL\"),\n", " api_version=get_env_variable(\"AZURE_OPENAI_API_VERSION\"),\n", " azure_endpoint=get_env_variable(\"AZURE_OPENAI_ENDPOINT\"),\n", " api_key=get_env_variable(\"AZURE_OPENAI_API_KEY\"),\n", - " model_capabilities={\n", - " \"vision\": False,\n", - " \"function_calling\": True,\n", - " \"json_output\": True,\n", - " },\n", ")" ] }, diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/core-concepts/architecture.md b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/core-concepts/architecture.md index 3346cb68cb56..d3e38802ae3b 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/core-concepts/architecture.md +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/core-concepts/architecture.md @@ -1,6 +1,6 @@ # Agent Runtime Environments -At the foundation level, the framework provides a _runtime envionment_, which facilitates +At the foundation level, the framework provides a _runtime environment_, which facilitates communication between agents, manages their identities and lifecycles, and enforce security and privacy boundaries. diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/mixture-of-agents.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/mixture-of-agents.ipynb index dff7d18bd424..61b8b62bc221 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/mixture-of-agents.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/mixture-of-agents.ipynb @@ -41,7 +41,8 @@ "from autogen_core.application import SingleThreadedAgentRuntime\n", "from autogen_core.base import AgentId, MessageContext\n", "from autogen_core.components import RoutedAgent, message_handler\n", - "from autogen_core.components.models import ChatCompletionClient, OpenAIChatCompletionClient, SystemMessage, UserMessage" + "from autogen_core.components.models import ChatCompletionClient, SystemMessage, UserMessage\n", + "from autogen_ext.models import OpenAIChatCompletionClient" ] }, { diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/multi-agent-debate.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/multi-agent-debate.ipynb index 3120363dd23b..72b653687915 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/multi-agent-debate.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/design-patterns/multi-agent-debate.ipynb @@ -50,10 +50,10 @@ " AssistantMessage,\n", " ChatCompletionClient,\n", " LLMMessage,\n", - " OpenAIChatCompletionClient,\n", " SystemMessage,\n", " UserMessage,\n", - ")" + ")\n", + "from autogen_ext.models import OpenAIChatCompletionClient" ] }, { diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb index 833799c2096a..8bdc396a3196 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/distributed-agent-runtime.ipynb @@ -21,6 +21,13 @@ "It also advertises the agents which they support to the host service,\n", "so the host service can deliver messages to the correct worker.\n", "\n", + "````{note}\n", + "The distributed agent runtime requires extra dependencies, install them using:\n", + "```bash\n", + "pip install autogen-core[grpc]==0.4.0.dev7\n", + "```\n", + "````\n", + "\n", "We can start a host service using {py:class}`~autogen_core.application.WorkerAgentRuntimeHost`." ] }, diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/model-clients.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/model-clients.ipynb index cb17886b964a..e074eb970e0e 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/model-clients.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/model-clients.ipynb @@ -283,7 +283,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -294,16 +294,12 @@ "token_provider = get_bearer_token_provider(DefaultAzureCredential(), \"https://cognitiveservices.azure.com/.default\")\n", "\n", "az_model_client = AzureOpenAIChatCompletionClient(\n", - " model=\"{your-azure-deployment}\",\n", + " azure_deployment=\"{your-azure-deployment}\",\n", + " model=\"{model-name, such as gpt-4o}\",\n", " api_version=\"2024-06-01\",\n", " azure_endpoint=\"https://{your-custom-endpoint}.openai.azure.com/\",\n", " azure_ad_token_provider=token_provider, # Optional if you choose key-based authentication.\n", " # api_key=\"sk-...\", # For key-based authentication.\n", - " model_capabilities={\n", - " \"vision\": True,\n", - " \"function_calling\": True,\n", - " \"json_output\": True,\n", - " },\n", ")" ] }, diff --git a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/tools.ipynb b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/tools.ipynb index 183d878e4c8f..ff24095e8b50 100644 --- a/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/tools.ipynb +++ b/python/packages/autogen-core/docs/src/user-guide/core-user-guide/framework/tools.ipynb @@ -161,12 +161,12 @@ "from autogen_core.components.models import (\n", " ChatCompletionClient,\n", " LLMMessage,\n", - " OpenAIChatCompletionClient,\n", " SystemMessage,\n", " UserMessage,\n", ")\n", "from autogen_core.components.tool_agent import ToolAgent, tool_agent_caller_loop\n", "from autogen_core.components.tools import FunctionTool, Tool, ToolSchema\n", + "from autogen_ext.models import OpenAIChatCompletionClient\n", "\n", "\n", "@dataclass\n", diff --git a/python/packages/autogen-core/pyproject.toml b/python/packages/autogen-core/pyproject.toml index 9d564ded4b88..8727f5ee0c25 100644 --- a/python/packages/autogen-core/pyproject.toml +++ b/python/packages/autogen-core/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "autogen-core" -version = "0.4.0.dev6" +version = "0.4.0.dev7" license = {file = "LICENSE-CODE"} description = "Foundational interfaces and agent runtime implementation for AutoGen" readme = "README.md" @@ -20,7 +20,6 @@ dependencies = [ "aiohttp", "typing-extensions", "pydantic<3.0.0,>=2.0.0", - "grpcio~=1.62.0", "protobuf~=4.25.1", "tiktoken", "opentelemetry-api~=1.27.0", @@ -28,6 +27,11 @@ dependencies = [ "jsonref~=1.1.0", ] +[project.optional-dependencies] +grpc = [ + "grpcio~=1.62.0", +] + [tool.uv] dev-dependencies = [ "aiofiles", @@ -68,6 +72,7 @@ dev-dependencies = [ "sphinx", "sphinxcontrib-apidoc", "autodoc_pydantic~=2.2", + "pygments", # Documentation tooling "sphinx-autobuild", @@ -76,7 +81,7 @@ dev-dependencies = [ [tool.ruff] extend = "../../pyproject.toml" -exclude = ["build", "dist", "src/autogen_core/application/protos"] +exclude = ["build", "dist", "src/autogen_core/application/protos", "tests/protos"] include = ["src/**", "samples/*.py", "docs/**/*.ipynb", "tests/**"] [tool.ruff.lint.per-file-ignores] @@ -86,7 +91,7 @@ include = ["src/**", "samples/*.py", "docs/**/*.ipynb", "tests/**"] [tool.pyright] extends = "../../pyproject.toml" include = ["src", "tests", "samples"] -exclude = ["src/autogen_core/application/protos"] +exclude = ["src/autogen_core/application/protos", "tests/protos"] reportDeprecated = false [tool.pytest.ini_options] @@ -106,7 +111,7 @@ include = "../../shared_tasks.toml" test = "pytest -n auto" mypy.default_item_type = "cmd" mypy.sequence = [ - "mypy --config-file ../../pyproject.toml --exclude src/autogen_core/application/protos src tests", + "mypy --config-file ../../pyproject.toml --exclude src/autogen_core/application/protos --exclude tests/protos src tests", "nbqa mypy docs/src --config-file ../../pyproject.toml", ] @@ -149,3 +154,10 @@ ref = "docs-apidoc-all" [[tool.poe.tasks.docs-check.sequence]] cmd = "sphinx-build --fail-on-warning docs/src docs/build" + +[[tool.poe.tasks.docs-check-examples.sequence]] +ref = "docs-apidoc-all" + +[[tool.poe.tasks.docs-check-examples.sequence]] +cmd = "sphinx-build -b code_lint docs/src docs/build" + diff --git a/python/packages/autogen-core/samples/chess_game.py b/python/packages/autogen-core/samples/chess_game.py index 91ef9c8d0900..e0bc95e1d572 100644 --- a/python/packages/autogen-core/samples/chess_game.py +++ b/python/packages/autogen-core/samples/chess_game.py @@ -52,6 +52,7 @@ def get_legal_moves( def get_board(board: Board) -> str: + """Get the current board state.""" return str(board) @@ -63,25 +64,25 @@ def make_move( ) -> Annotated[str, "Result of the move."]: """Make a move on the board.""" validate_turn(board, player) - newMove = Move.from_uci(move) - board.push(newMove) + new_move = Move.from_uci(move) + board.push(new_move) # Print the move. print("-" * 50) print("Player:", player) - print("Move:", newMove.uci()) + print("Move:", new_move.uci()) print("Thinking:", thinking) print("Board:") print(board.unicode(borders=True)) # Get the piece name. - piece = board.piece_at(newMove.to_square) + piece = board.piece_at(new_move.to_square) assert piece is not None piece_symbol = piece.unicode_symbol() piece_name = get_piece_name(piece.piece_type) if piece_symbol.isupper(): piece_name = piece_name.capitalize() - return f"Moved {piece_name} ({piece_symbol}) from {SQUARE_NAMES[newMove.from_square]} to {SQUARE_NAMES[newMove.to_square]}." + return f"Moved {piece_name} ({piece_symbol}) from {SQUARE_NAMES[new_move.from_square]} to {SQUARE_NAMES[new_move.to_square]}." async def chess_game(runtime: AgentRuntime) -> None: # type: ignore @@ -152,7 +153,8 @@ def get_board_text() -> Annotated[str, "The current board state"]: ), ] - await runtime.register( + await ChatCompletionAgent.register( + runtime, "PlayerBlack", lambda: ChatCompletionAgent( description="Player playing black.", @@ -168,9 +170,11 @@ def get_board_text() -> Annotated[str, "The current board state"]: model_client=get_chat_completion_client_from_envs(model="gpt-4o"), tools=black_tools, ), - lambda: [DefaultSubscription()], ) - await runtime.register( + await runtime.add_subscription(DefaultSubscription(agent_type="PlayerBlack")) + + await ChatCompletionAgent.register( + runtime, "PlayerWhite", lambda: ChatCompletionAgent( description="Player playing white.", @@ -186,11 +190,13 @@ def get_board_text() -> Annotated[str, "The current board state"]: model_client=get_chat_completion_client_from_envs(model="gpt-4o"), tools=white_tools, ), - lambda: [DefaultSubscription()], ) + await runtime.add_subscription(DefaultSubscription(agent_type="PlayerWhite")) + # Create a group chat manager for the chess game to orchestrate a turn-based # conversation between the two agents. - await runtime.register( + await GroupChatManager.register( + runtime, "ChessGame", lambda: GroupChatManager( description="A chess game between two agents.", @@ -200,16 +206,21 @@ def get_board_text() -> Annotated[str, "The current board state"]: AgentId("PlayerBlack", AgentInstantiationContext.current_agent_id().key), ], # white goes first ), - lambda: [DefaultSubscription()], ) + await runtime.add_subscription(DefaultSubscription(agent_type="ChessGame")) async def main() -> None: + """Main Entrypoint.""" runtime = SingleThreadedAgentRuntime() await chess_game(runtime) runtime.start() - # Publish an initial message to trigger the group chat manager to start orchestration. - await runtime.publish_message(TextMessage(content="Game started.", source="System"), topic_id=DefaultTopicId()) + # Publish an initial message to trigger the group chat manager to start + # orchestration. + await runtime.publish_message( + TextMessage(content="Game started.", source="System"), + topic_id=DefaultTopicId(), + ) await runtime.stop_when_idle() diff --git a/python/packages/autogen-core/samples/common/utils.py b/python/packages/autogen-core/samples/common/utils.py index 4e77ac33232e..0765ceec561a 100644 --- a/python/packages/autogen-core/samples/common/utils.py +++ b/python/packages/autogen-core/samples/common/utils.py @@ -3,14 +3,13 @@ from autogen_core.components.models import ( AssistantMessage, - AzureOpenAIChatCompletionClient, ChatCompletionClient, FunctionExecutionResult, FunctionExecutionResultMessage, LLMMessage, - OpenAIChatCompletionClient, UserMessage, ) +from autogen_ext.models import AzureOpenAIChatCompletionClient, OpenAIChatCompletionClient from azure.identity import DefaultAzureCredential, get_bearer_token_provider from typing_extensions import Literal diff --git a/python/packages/autogen-core/samples/distributed-group-chat/README.md b/python/packages/autogen-core/samples/distributed-group-chat/README.md index 43d17ee0ebd9..b4cf16583996 100644 --- a/python/packages/autogen-core/samples/distributed-group-chat/README.md +++ b/python/packages/autogen-core/samples/distributed-group-chat/README.md @@ -31,7 +31,7 @@ The [run.sh](./run.sh) file provides commands to run the host and agents using [ Here is a screen recording of the execution: -[![Distributed Group Chat Demo with Simple UI Integration](https://img.youtube.com/vi/kLTzI-3VgPQ/0.jpg)](https://youtu.be/kLTzI-3VgPQ) +[![Distributed Group Chat Demo with Simple UI Integration](https://img.youtube.com/vi/503QJ1onV8I/0.jpg)](https://youtu.be/503QJ1onV8I?feature=shared) **Note**: Some `asyncio.sleep` commands have been added to the example code to make the `./run.sh` execution look sequential and visually easy to follow. In practice, these lines are not necessary. @@ -40,14 +40,16 @@ Here is a screen recording of the execution: If you prefer to run Python files individually, follow these steps. Note that each step must be run in a different terminal process, and the virtual environment should be activated using `source .venv/bin/activate`. 1. `python run_host.py`: Starts the host and listens for agent connections. -2. `python run_editor.py`: Starts the editor agent and connects it to the host. -3. `python run_writer.py`: Starts the writer agent and connects it to the host. -4. `chainlit run run_group_chat_manager.py --port 8001`: Run chainlit app which starts group chat manager agent and sends the initial message to start the conversation. We're using port 8001 as the default port 8000 is used to run host (assuming using same machine to run all of the agents) +2. `chainlit run run_ui.py --port 8001`: Starts the Chainlit app and UI agent and listens on UI topic to display messages. We're using port 8001 as the default port 8000 is used to run host (assuming using same machine to run all of the agents) +3. `python run_editor.py`: Starts the editor agent and connects it to the host. +4. `python run_writer.py`: Starts the writer agent and connects it to the host. +5. `python run_group_chat_manager.py`: Run chainlit app which starts group chat manager agent and sends the initial message to start the conversation. ## What's Going On? The general flow of this example is as follows: +0. The UI Agent runs starts the UI App, listens for stream of messages in the UI topic and displays them in the UI. 1. The Group Chat Manager, on behalf of `User`, sends a `RequestToSpeak` request to the `writer_agent`. 2. The `writer_agent` writes a short sentence into the group chat topic. 3. The `editor_agent` receives the message in the group chat topic and updates its memory. @@ -64,41 +66,49 @@ graph TD; A1[GRPC Server] wt[Writer Topic] et[Editor Topic] + ut[UI Topic] gct[Group Chat Topic] end + all_agents[All Agents - Simplified Arrows!] --> A1 subgraph Distributed Writer Runtime - writer_agent[ Writer Agent] --> A1 wt -.->|2 - Subscription| writer_agent gct -.->|4 - Subscription| writer_agent - writer_agent -.->|3 - Publish: Group Chat Message| gct + writer_agent -.->|3.1 - Publish: UI Message| ut + writer_agent -.->|3.2 - Publish: Group Chat Message| gct end subgraph Distributed Editor Runtime - editor_agent[ Editor Agent] --> A1 et -.->|6 - Subscription| editor_agent gct -.->|4 - Subscription| editor_agent - editor_agent -.->|7 - Publish: Group Chat Message| gct + editor_agent -.->|7.1 - Publish: UI Message| ut + editor_agent -.->|7.2 - Publish: Group Chat Message| gct end subgraph Distributed Group Chat Manager Runtime - group_chat_manager[ Group Chat Manager Agent] --> A1 gct -.->|4 - Subscription| group_chat_manager group_chat_manager -.->|1 - Request To Speak| wt group_chat_manager -.->|5 - Request To Speak| et + group_chat_manager -.->|\* - Publish Some of to UI Message| ut end + subgraph Distributed UI Runtime + ut -.->|\* - Subscription| ui_agent + end + + style wt fill:#beb2c3,color:#000 style et fill:#beb2c3,color:#000 style gct fill:#beb2c3,color:#000 + style ut fill:#beb2c3,color:#000 style writer_agent fill:#b7c4d7,color:#000 style editor_agent fill:#b7c4d7,color:#000 style group_chat_manager fill:#b7c4d7,color:#000 + style ui_agent fill:#b7c4d7,color:#000 ``` ## TODO: - [ ] Properly handle chat restarts. It complains about group chat manager being already registered -- [ ] Send Chainlit messages within each agent (Currently the manager can just sends messages in the group chat topic) -- [ ] Add streaming to the UI like [this example](https://docs.chainlit.io/advanced-features/streaming) but Autogen's Open AI Client [does not supporting streaming yet](https://github.com/microsoft/autogen/blob/0f4dd0cc6dd3eea303ad3d2063979b4b9a1aacfc/python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py#L81) +- [ ] Add streaming to the UI like [this example](https://docs.chainlit.io/advanced-features/streaming) when [this bug](https://github.com/microsoft/autogen/issues/4213) is resolved diff --git a/python/packages/autogen-core/samples/distributed-group-chat/_agents.py b/python/packages/autogen-core/samples/distributed-group-chat/_agents.py index 89ac74555967..8ff935600bab 100644 --- a/python/packages/autogen-core/samples/distributed-group-chat/_agents.py +++ b/python/packages/autogen-core/samples/distributed-group-chat/_agents.py @@ -1,6 +1,10 @@ +import asyncio +import random from typing import Awaitable, Callable, List +from uuid import uuid4 -from _types import GroupChatMessage, RequestToSpeak +from _types import GroupChatMessage, MessageChunk, RequestToSpeak, UIAgentConfig +from autogen_core.application import WorkerAgentRuntime from autogen_core.base import MessageContext from autogen_core.components import DefaultTopicId, RoutedAgent, message_handler from autogen_core.components.models import ( @@ -23,12 +27,15 @@ def __init__( group_chat_topic_type: str, model_client: ChatCompletionClient, system_message: str, + ui_config: UIAgentConfig, ) -> None: super().__init__(description=description) self._group_chat_topic_type = group_chat_topic_type self._model_client = model_client self._system_message = SystemMessage(system_message) self._chat_history: List[LLMMessage] = [] + self._ui_config = ui_config + self.console = Console() @message_handler async def handle_message(self, message: GroupChatMessage, ctx: MessageContext) -> None: @@ -47,11 +54,16 @@ async def handle_request_to_speak(self, message: RequestToSpeak, ctx: MessageCon completion = await self._model_client.create([self._system_message] + self._chat_history) assert isinstance(completion.content, str) self._chat_history.append(AssistantMessage(content=completion.content, source=self.id.type)) - Console().print(Markdown(f"**{self.id.type}**: {completion.content}\n")) - await self.publish_message( - GroupChatMessage(body=UserMessage(content=completion.content, source=self.id.type)), - topic_id=DefaultTopicId(type=self._group_chat_topic_type), + console_message = f"\n{'-'*80}\n**{self.id.type}**: {completion.content}" + self.console.print(Markdown(console_message)) + + await publish_message_to_ui_and_backend( + runtime=self, + source=self.id.type, + user_message=completion.content, + ui_config=self._ui_config, + group_chat_topic_type=self._group_chat_topic_type, ) @@ -61,7 +73,7 @@ def __init__( model_client: ChatCompletionClient, participant_topic_types: List[str], participant_descriptions: List[str], - on_message_func: Callable[[str, str], Awaitable[None]], + ui_config: UIAgentConfig, max_rounds: int = 3, ) -> None: super().__init__("Group chat manager") @@ -71,14 +83,14 @@ def __init__( self._chat_history: List[GroupChatMessage] = [] self._max_rounds = max_rounds self.console = Console() - self._on_message_func = on_message_func self._participant_descriptions = participant_descriptions self._previous_participant_topic_type: str | None = None + self._ui_config = ui_config @message_handler async def handle_message(self, message: GroupChatMessage, ctx: MessageContext) -> None: assert isinstance(message.body, UserMessage) - await self._on_message_func(message.body.content, message.body.source) # type: ignore[arg-type] + self._chat_history.append(message.body) # type: ignore[reportargumenttype,arg-type] # Format message history. @@ -117,11 +129,17 @@ async def handle_message(self, message: GroupChatMessage, ctx: MessageContext) - """ system_message = SystemMessage(selector_prompt) completion = await self._model_client.create([system_message], cancellation_token=ctx.cancellation_token) - assert isinstance(completion.content, str) + + assert isinstance( + completion.content, str + ), f"Completion content must be a string, but is: {type(completion.content)}" if completion.content.upper() == "FINISH": - manager_message = f"\n{'-'*80}\n Manager ({id(self)}): I think it's enough iterations on the story! Thanks for collaborating!" - await self._on_message_func(manager_message, "group_chat_manager") + finish_msg = "I think it's enough iterations on the story! Thanks for collaborating!" + manager_message = f"\n{'-'*80}\n Manager ({id(self)}): {finish_msg}" + await publish_message_to_ui( + runtime=self, source=self.id.type, user_message=finish_msg, ui_config=self._ui_config + ) self.console.print(Markdown(manager_message)) return @@ -136,3 +154,62 @@ async def handle_message(self, message: GroupChatMessage, ctx: MessageContext) - await self.publish_message(RequestToSpeak(), DefaultTopicId(type=selected_topic_type)) return raise ValueError(f"Invalid role selected: {completion.content}") + + +class UIAgent(RoutedAgent): + """Handles UI-related tasks and message processing for the distributed group chat system.""" + + def __init__(self, on_message_chunk_func: Callable[[MessageChunk], Awaitable[None]]) -> None: + super().__init__("UI Agent") + self._on_message_chunk_func = on_message_chunk_func + + @message_handler + async def handle_message_chunk(self, message: MessageChunk, ctx: MessageContext) -> None: + await self._on_message_chunk_func(message) + + +async def publish_message_to_ui( + runtime: RoutedAgent | WorkerAgentRuntime, + source: str, + user_message: str, + ui_config: UIAgentConfig, +) -> None: + message_id = str(uuid4()) + # Stream the message to UI + message_chunks = ( + MessageChunk(message_id=message_id, text=token + " ", author=source, finished=False) + for token in user_message.split() + ) + for chunk in message_chunks: + await runtime.publish_message( + chunk, + DefaultTopicId(type=ui_config.topic_type), + ) + await asyncio.sleep(random.uniform(ui_config.min_delay, ui_config.max_delay)) + + await runtime.publish_message( + MessageChunk(message_id=message_id, text=" ", author=source, finished=True), + DefaultTopicId(type=ui_config.topic_type), + ) + + +async def publish_message_to_ui_and_backend( + runtime: RoutedAgent | WorkerAgentRuntime, + source: str, + user_message: str, + ui_config: UIAgentConfig, + group_chat_topic_type: str, +) -> None: + # Publish messages for ui + await publish_message_to_ui( + runtime=runtime, + source=source, + user_message=user_message, + ui_config=ui_config, + ) + + # Publish message to backend + await runtime.publish_message( + GroupChatMessage(body=UserMessage(content=user_message, source=source)), + topic_id=DefaultTopicId(type=group_chat_topic_type), + ) diff --git a/python/packages/autogen-core/samples/distributed-group-chat/_types.py b/python/packages/autogen-core/samples/distributed-group-chat/_types.py index 343c264f1823..0e05d941c1ff 100644 --- a/python/packages/autogen-core/samples/distributed-group-chat/_types.py +++ b/python/packages/autogen-core/samples/distributed-group-chat/_types.py @@ -1,7 +1,10 @@ +from dataclasses import dataclass +from typing import Dict + from autogen_core.components.models import ( LLMMessage, ) -from autogen_core.components.models.config import AzureOpenAIClientConfiguration +from autogen_ext.models import AzureOpenAIClientConfiguration from pydantic import BaseModel @@ -17,6 +20,17 @@ class RequestToSpeak(BaseModel): pass +@dataclass +class MessageChunk: + message_id: str + text: str + author: str + finished: bool + + def __str__(self) -> str: + return f"{self.author}({self.message_id}): {self.text}" + + # Define Host configuration model class HostConfig(BaseModel): hostname: str @@ -40,10 +54,25 @@ class ChatAgentConfig(BaseModel): system_message: str +# Define UI Agent configuration model +class UIAgentConfig(BaseModel): + topic_type: str + artificial_stream_delay_seconds: Dict[str, float] + + @property + def min_delay(self) -> float: + return self.artificial_stream_delay_seconds.get("min", 0.0) + + @property + def max_delay(self) -> float: + return self.artificial_stream_delay_seconds.get("max", 0.0) + + # Define the overall AppConfig model class AppConfig(BaseModel): host: HostConfig group_chat_manager: GroupChatManagerConfig writer_agent: ChatAgentConfig editor_agent: ChatAgentConfig - client_config: AzureOpenAIClientConfiguration = None # type: ignore[assignment] # This was required to do custom instantiation in `load_config`` + ui_agent: UIAgentConfig + client_config: AzureOpenAIClientConfiguration = None # type: ignore[assignment] # This was required to do custom instantiation in `load_config` diff --git a/python/packages/autogen-core/samples/distributed-group-chat/_utils.py b/python/packages/autogen-core/samples/distributed-group-chat/_utils.py index 2c4b768e49da..431a94319fc5 100644 --- a/python/packages/autogen-core/samples/distributed-group-chat/_utils.py +++ b/python/packages/autogen-core/samples/distributed-group-chat/_utils.py @@ -5,7 +5,7 @@ import yaml from _types import AppConfig from autogen_core.base import MessageSerializer, try_get_known_serializers_for_type -from autogen_core.components.models.config import AzureOpenAIClientConfiguration +from autogen_ext.models import AzureOpenAIClientConfiguration from azure.identity import DefaultAzureCredential, get_bearer_token_provider diff --git a/python/packages/autogen-core/samples/distributed-group-chat/config.yaml b/python/packages/autogen-core/samples/distributed-group-chat/config.yaml index 26eaf5b3b38f..f18b4545500a 100644 --- a/python/packages/autogen-core/samples/distributed-group-chat/config.yaml +++ b/python/packages/autogen-core/samples/distributed-group-chat/config.yaml @@ -4,17 +4,23 @@ host: group_chat_manager: topic_type: "group_chat" - max_rounds: 7 + max_rounds: 3 writer_agent: topic_type: "Writer" description: "Writer for creating any text content." - system_message: "You are a one sentence Writer and provide one line content each time" + system_message: "You are a one sentence Writer and provide one sentence content each time" editor_agent: topic_type: "Editor" description: "Editor for planning and reviewing the content." - system_message: "You are an Editor. You provide just max 10 words as feedback on writers content." + system_message: "You are an Editor. You provide just max 15 words as feedback on writers content." + +ui_agent: + topic_type: "ui_events" + artificial_stream_delay_seconds: + min: 0.05 + max: 0.1 client_config: model: "gpt-4o" diff --git a/python/packages/autogen-core/samples/distributed-group-chat/run.sh b/python/packages/autogen-core/samples/distributed-group-chat/run.sh index 859096e9402b..d4b8c1b1b6f4 100755 --- a/python/packages/autogen-core/samples/distributed-group-chat/run.sh +++ b/python/packages/autogen-core/samples/distributed-group-chat/run.sh @@ -5,12 +5,14 @@ tmux new-session -d -s distributed_group_chat # # Split the terminal into 2 vertical panes tmux split-window -h -# # Split the left pane horizontally +# # Split the left pane into 3 windows tmux select-pane -t distributed_group_chat:0.0 -tmux split-window -v +tmux split-window -v +tmux select-pane -t distributed_group_chat:0.0 +tmux split-window -v # # Split the right pane horizontally -tmux select-pane -t distributed_group_chat:0.2 +tmux select-pane -t distributed_group_chat:0.3 tmux split-window -v # Select the first pane to start @@ -18,9 +20,10 @@ tmux select-pane -t distributed_group_chat:0.0 # Activate the virtual environment and run the scripts in each pane tmux send-keys -t distributed_group_chat:0.0 "python run_host.py" C-m -tmux send-keys -t distributed_group_chat:0.2 "python run_writer_agent.py" C-m -tmux send-keys -t distributed_group_chat:0.3 "python run_editor_agent.py" C-m -tmux send-keys -t distributed_group_chat:0.1 "chainlit run run_group_chat_manager.py --port 8001" C-m +tmux send-keys -t distributed_group_chat:0.1 "chainlit run run_ui.py --port 8001" C-m +tmux send-keys -t distributed_group_chat:0.3 "python run_writer_agent.py" C-m +tmux send-keys -t distributed_group_chat:0.4 "python run_editor_agent.py" C-m +tmux send-keys -t distributed_group_chat:0.2 "python run_group_chat_manager.py" C-m # # Attach to the session tmux attach-session -t distributed_group_chat diff --git a/python/packages/autogen-core/samples/distributed-group-chat/run_editor_agent.py b/python/packages/autogen-core/samples/distributed-group-chat/run_editor_agent.py index a452db6b9a3e..8a08bfe6b479 100644 --- a/python/packages/autogen-core/samples/distributed-group-chat/run_editor_agent.py +++ b/python/packages/autogen-core/samples/distributed-group-chat/run_editor_agent.py @@ -3,13 +3,13 @@ import warnings from _agents import BaseGroupChatAgent -from _types import AppConfig, GroupChatMessage, RequestToSpeak +from _types import AppConfig, GroupChatMessage, MessageChunk, RequestToSpeak from _utils import get_serializers, load_config, set_all_log_levels from autogen_core.application import WorkerAgentRuntime from autogen_core.components import ( TypeSubscription, ) -from autogen_core.components.models._openai_client import AzureOpenAIChatCompletionClient +from autogen_ext.models import AzureOpenAIChatCompletionClient from rich.console import Console from rich.markdown import Markdown @@ -17,7 +17,7 @@ async def main(config: AppConfig): set_all_log_levels(logging.ERROR) editor_agent_runtime = WorkerAgentRuntime(host_address=config.host.address) - editor_agent_runtime.add_message_serializer(get_serializers([RequestToSpeak, GroupChatMessage])) # type: ignore[arg-type] + editor_agent_runtime.add_message_serializer(get_serializers([RequestToSpeak, GroupChatMessage, MessageChunk])) # type: ignore[arg-type] await asyncio.sleep(4) Console().print(Markdown("Starting **`Editor Agent`**")) editor_agent_runtime.start() @@ -29,6 +29,7 @@ async def main(config: AppConfig): group_chat_topic_type=config.group_chat_manager.topic_type, system_message=config.editor_agent.system_message, model_client=AzureOpenAIChatCompletionClient(**config.client_config), + ui_config=config.ui_agent, ), ) await editor_agent_runtime.add_subscription( diff --git a/python/packages/autogen-core/samples/distributed-group-chat/run_group_chat_manager.py b/python/packages/autogen-core/samples/distributed-group-chat/run_group_chat_manager.py index 5af446ab1a94..e0b2880aa99f 100644 --- a/python/packages/autogen-core/samples/distributed-group-chat/run_group_chat_manager.py +++ b/python/packages/autogen-core/samples/distributed-group-chat/run_group_chat_manager.py @@ -2,18 +2,13 @@ import logging import warnings -import chainlit as cl # type: ignore [reportUnknownMemberType] # This dependency is installed through instructions -from _agents import GroupChatManager -from _types import AppConfig, GroupChatMessage, RequestToSpeak +from _agents import GroupChatManager, publish_message_to_ui, publish_message_to_ui_and_backend +from _types import AppConfig, GroupChatMessage, MessageChunk, RequestToSpeak from _utils import get_serializers, load_config, set_all_log_levels from autogen_core.application import WorkerAgentRuntime from autogen_core.components import ( - DefaultTopicId, TypeSubscription, ) -from autogen_core.components.models import ( - UserMessage, -) from autogen_ext.models import AzureOpenAIChatCompletionClient from rich.console import Console from rich.markdown import Markdown @@ -21,22 +16,16 @@ set_all_log_levels(logging.ERROR) -# TODO: This is the simple hack to send messages to the UI, needs to be improved once we get some help in https://github.com/Chainlit/chainlit/issues/1491 -async def send_cl(msg: str, author: str) -> None: - await cl.Message(content=msg, author=author).send() # type: ignore [reportAttributeAccessIssue,reportUnknownMemberType] - - async def main(config: AppConfig): set_all_log_levels(logging.ERROR) group_chat_manager_runtime = WorkerAgentRuntime(host_address=config.host.address) - # Add group chat manager runtime - - group_chat_manager_runtime.add_message_serializer(get_serializers([RequestToSpeak, GroupChatMessage])) # type: ignore[arg-type] + group_chat_manager_runtime.add_message_serializer(get_serializers([RequestToSpeak, GroupChatMessage, MessageChunk])) # type: ignore[arg-type] await asyncio.sleep(1) Console().print(Markdown("Starting **`Group Chat Manager`**")) group_chat_manager_runtime.start() set_all_log_levels(logging.ERROR) + group_chat_manager_type = await GroupChatManager.register( group_chat_manager_runtime, "group_chat_manager", @@ -45,7 +34,7 @@ async def main(config: AppConfig): participant_topic_types=[config.writer_agent.topic_type, config.editor_agent.topic_type], participant_descriptions=[config.writer_agent.description, config.editor_agent.description], max_rounds=config.group_chat_manager.max_rounds, - on_message_func=send_cl, + ui_config=config.ui_agent, ), ) @@ -53,36 +42,32 @@ async def main(config: AppConfig): TypeSubscription(topic_type=config.group_chat_manager.topic_type, agent_type=group_chat_manager_type.type) ) - # This is a simple way to make sure first message gets send after all of the agents have joined await asyncio.sleep(5) - user_message: str = "Please write a one line story about the gingerbread in halloween!" + + await publish_message_to_ui( + runtime=group_chat_manager_runtime, + source="System", + user_message="[ **Due to responsible AI considerations of this sample, group chat manager is sending an initiator message on behalf of user** ]", + ui_config=config.ui_agent, + ) + await asyncio.sleep(3) + + user_message: str = "Please write a short story about the gingerbread in halloween!" Console().print(f"Simulating User input in group chat topic:\n\t'{user_message}'") - await group_chat_manager_runtime.publish_message( - GroupChatMessage( - body=UserMessage( - content=user_message, - source="User", - ) - ), - DefaultTopicId(type=config.group_chat_manager.topic_type), + + await publish_message_to_ui_and_backend( + runtime=group_chat_manager_runtime, + source="User", + user_message=user_message, + ui_config=config.ui_agent, + group_chat_topic_type=config.group_chat_manager.topic_type, ) await group_chat_manager_runtime.stop_when_signal() Console().print("Manager left the chat!") -@cl.on_chat_start # type: ignore -async def start_chat(): +if __name__ == "__main__": set_all_log_levels(logging.ERROR) warnings.filterwarnings("ignore", category=UserWarning, message="Resolved model mismatch.*") asyncio.run(main(load_config())) - - -# This can be used for debugging, you can run this file using python -# if __name__ == "__main__": -# from chainlit.cli import run_chainlit - -# set_all_log_levels(logging.ERROR) -# run_chainlit( -# __file__, -# ) diff --git a/python/packages/autogen-core/samples/distributed-group-chat/run_ui.py b/python/packages/autogen-core/samples/distributed-group-chat/run_ui.py new file mode 100644 index 000000000000..aeb24e8d1755 --- /dev/null +++ b/python/packages/autogen-core/samples/distributed-group-chat/run_ui.py @@ -0,0 +1,67 @@ +import asyncio +import logging +import warnings + +import chainlit as cl # type: ignore [reportUnknownMemberType] # This dependency is installed through instructions +from _agents import MessageChunk, UIAgent +from _types import AppConfig, GroupChatMessage, RequestToSpeak +from _utils import get_serializers, load_config, set_all_log_levels +from autogen_core.application import WorkerAgentRuntime +from autogen_core.components import ( + TypeSubscription, +) +from chainlit import Message # type: ignore [reportAttributeAccessIssue] +from rich.console import Console +from rich.markdown import Markdown + +set_all_log_levels(logging.ERROR) + + +message_chunks: dict[str, Message] = {} # type: ignore [reportUnknownVariableType] + + +async def send_cl_stream(msg: MessageChunk) -> None: + if msg.message_id not in message_chunks: + message_chunks[msg.message_id] = Message(content="", author=msg.author) + + if not msg.finished: + await message_chunks[msg.message_id].stream_token(msg.text) # type: ignore [reportUnknownVariableType] + else: + await message_chunks[msg.message_id].stream_token(msg.text) # type: ignore [reportUnknownVariableType] + await message_chunks[msg.message_id].update() # type: ignore [reportUnknownMemberType] + await asyncio.sleep(3) + cl_msg = message_chunks[msg.message_id] # type: ignore [reportUnknownVariableType] + await cl_msg.send() # type: ignore [reportUnknownMemberType] + + +async def main(config: AppConfig): + set_all_log_levels(logging.ERROR) + ui_agent_runtime = WorkerAgentRuntime(host_address=config.host.address) + + ui_agent_runtime.add_message_serializer(get_serializers([RequestToSpeak, GroupChatMessage, MessageChunk])) # type: ignore[arg-type] + + Console().print(Markdown("Starting **`UI Agent`**")) + ui_agent_runtime.start() + set_all_log_levels(logging.ERROR) + + ui_agent_type = await UIAgent.register( + ui_agent_runtime, + "ui_agent", + lambda: UIAgent( + on_message_chunk_func=send_cl_stream, + ), + ) + + await ui_agent_runtime.add_subscription( + TypeSubscription(topic_type=config.ui_agent.topic_type, agent_type=ui_agent_type.type) + ) # TODO: This could be a great example of using agent_id to route to sepecific element in the ui. Can replace MessageChunk.message_id + + await ui_agent_runtime.stop_when_signal() + Console().print("UI Agent left the chat!") + + +@cl.on_chat_start # type: ignore +async def start_chat(): + set_all_log_levels(logging.ERROR) + warnings.filterwarnings("ignore", category=UserWarning, message="Resolved model mismatch.*") + asyncio.run(main(load_config())) diff --git a/python/packages/autogen-core/samples/distributed-group-chat/run_writer_agent.py b/python/packages/autogen-core/samples/distributed-group-chat/run_writer_agent.py index 1c6935de383e..674ed59ce3a8 100644 --- a/python/packages/autogen-core/samples/distributed-group-chat/run_writer_agent.py +++ b/python/packages/autogen-core/samples/distributed-group-chat/run_writer_agent.py @@ -3,7 +3,7 @@ import warnings from _agents import BaseGroupChatAgent -from _types import AppConfig, GroupChatMessage, RequestToSpeak +from _types import AppConfig, GroupChatMessage, MessageChunk, RequestToSpeak from _utils import get_serializers, load_config, set_all_log_levels from autogen_core.application import WorkerAgentRuntime from autogen_core.components import ( @@ -17,7 +17,7 @@ async def main(config: AppConfig) -> None: set_all_log_levels(logging.ERROR) writer_agent_runtime = WorkerAgentRuntime(host_address=config.host.address) - writer_agent_runtime.add_message_serializer(get_serializers([RequestToSpeak, GroupChatMessage])) # type: ignore[arg-type] + writer_agent_runtime.add_message_serializer(get_serializers([RequestToSpeak, GroupChatMessage, MessageChunk])) # type: ignore[arg-type] await asyncio.sleep(3) Console().print(Markdown("Starting **`Writer Agent`**")) @@ -30,6 +30,7 @@ async def main(config: AppConfig) -> None: group_chat_topic_type=config.group_chat_manager.topic_type, system_message=config.writer_agent.system_message, model_client=AzureOpenAIChatCompletionClient(**config.client_config), + ui_config=config.ui_agent, ), ) await writer_agent_runtime.add_subscription( diff --git a/python/packages/autogen-core/src/autogen_core/application/_single_threaded_agent_runtime.py b/python/packages/autogen-core/src/autogen_core/application/_single_threaded_agent_runtime.py index f511bd782d87..3d81f15eb330 100644 --- a/python/packages/autogen-core/src/autogen_core/application/_single_threaded_agent_runtime.py +++ b/python/packages/autogen-core/src/autogen_core/application/_single_threaded_agent_runtime.py @@ -4,6 +4,7 @@ import inspect import logging import threading +import uuid import warnings from asyncio import CancelledError, Future, Task from collections.abc import Sequence @@ -53,6 +54,7 @@ class PublishMessageEnvelope: sender: AgentId | None topic_id: TopicId metadata: EnvelopeMetadata | None = None + message_id: str @dataclass(kw_only=True) @@ -147,6 +149,23 @@ def _stop_when_idle(self) -> bool: return self._run_state == RunContext.RunState.UNTIL_IDLE and self._runtime.idle +def _warn_if_none(value: Any, handler_name: str) -> None: + """ + Utility function to check if the intervention handler returned None and issue a warning. + + Args: + value: The return value to check + handler_name: Name of the intervention handler method for the warning message + """ + if value is None: + warnings.warn( + f"Intervention handler {handler_name} returned None. This might be unintentional. " + "Consider returning the original message or DropMessage explicitly.", + RuntimeWarning, + stacklevel=2, + ) + + class SingleThreadedAgentRuntime(AgentRuntime): def __init__( self, @@ -239,6 +258,7 @@ async def publish_message( *, sender: AgentId | None = None, cancellation_token: CancellationToken | None = None, + message_id: str | None = None, ) -> None: with self._tracer_helper.trace_block( "create", @@ -251,6 +271,9 @@ async def publish_message( content = message.__dict__ if hasattr(message, "__dict__") else message logger.info(f"Publishing message of type {type(message).__name__} to all subscribers: {content}") + if message_id is None: + message_id = str(uuid.uuid4()) + # event_logger.info( # MessageEvent( # payload=message, @@ -268,6 +291,7 @@ async def publish_message( sender=sender, topic_id=topic_id, metadata=get_telemetry_envelope_metadata(), + message_id=message_id, ) ) @@ -310,6 +334,8 @@ async def _process_send(self, message_envelope: SendMessageEnvelope) -> None: topic_id=None, is_rpc=True, cancellation_token=message_envelope.cancellation_token, + # Will be fixed when send API removed + message_id="NOT_DEFINED_TODO_FIX", ) with MessageHandlerContext.populate_context(recipient_agent.id): response = await recipient_agent.on_message( @@ -368,6 +394,7 @@ async def _process_publish(self, message_envelope: PublishMessageEnvelope) -> No topic_id=message_envelope.topic_id, is_rpc=False, cancellation_token=message_envelope.cancellation_token, + message_id=message_envelope.message_id, ) agent = await self._get_agent(agent_id) @@ -433,6 +460,7 @@ async def process_next(self) -> None: ): try: temp_message = await handler.on_send(message, sender=sender, recipient=recipient) + _warn_if_none(temp_message, "on_send") except BaseException as e: future.set_exception(e) return @@ -456,6 +484,7 @@ async def process_next(self) -> None: ): try: temp_message = await handler.on_publish(message, sender=sender) + _warn_if_none(temp_message, "on_publish") except BaseException as e: # TODO: we should raise the intervention exception to the publisher. logger.error(f"Exception raised in in intervention handler: {e}", exc_info=True) @@ -474,6 +503,7 @@ async def process_next(self) -> None: for handler in self._intervention_handlers: try: temp_message = await handler.on_response(message, sender=sender, recipient=recipient) + _warn_if_none(temp_message, "on_response") except BaseException as e: # TODO: should we raise the exception to sender of the response instead? future.set_exception(e) diff --git a/python/packages/autogen-core/src/autogen_core/application/_utils.py b/python/packages/autogen-core/src/autogen_core/application/_utils.py new file mode 100644 index 000000000000..10fbfd1b8c8a --- /dev/null +++ b/python/packages/autogen-core/src/autogen_core/application/_utils.py @@ -0,0 +1,3 @@ +GRPC_IMPORT_ERROR_STR = ( + "Distributed runtime features require additional dependencies. Install them with: pip install autogen-core[grpc]" +) diff --git a/python/packages/autogen-core/src/autogen_core/application/_worker_runtime.py b/python/packages/autogen-core/src/autogen_core/application/_worker_runtime.py index ac3e00e4a4a9..0e5fb933a08e 100644 --- a/python/packages/autogen-core/src/autogen_core/application/_worker_runtime.py +++ b/python/packages/autogen-core/src/autogen_core/application/_worker_runtime.py @@ -3,6 +3,7 @@ import json import logging import signal +import uuid import warnings from asyncio import Future, Task from collections import defaultdict @@ -27,16 +28,11 @@ cast, ) -import grpc -from grpc.aio import StreamStreamCall from opentelemetry.trace import TracerProvider from typing_extensions import Self, deprecated -from autogen_core.base import JSON_DATA_CONTENT_TYPE -from autogen_core.base._serialization import MessageSerializer, SerializationRegistry -from autogen_core.base._type_helpers import ChannelArgumentType - from ..base import ( + JSON_DATA_CONTENT_TYPE, Agent, AgentId, AgentInstantiationContext, @@ -50,11 +46,19 @@ SubscriptionInstantiationContext, TopicId, ) -from ..components import TypeSubscription +from ..base._serialization import MessageSerializer, SerializationRegistry +from ..base._type_helpers import ChannelArgumentType +from ..components import TypePrefixSubscription, TypeSubscription from ._helpers import SubscriptionManager, get_impl +from ._utils import GRPC_IMPORT_ERROR_STR from .protos import agent_worker_pb2, agent_worker_pb2_grpc from .telemetry import MessageRuntimeTracingConfig, TraceHelper, get_telemetry_grpc_metadata +try: + import grpc.aio +except ImportError as e: + raise ImportError(GRPC_IMPORT_ERROR_STR) from e + if TYPE_CHECKING: from .protos.agent_worker_pb2_grpc import AgentRpcAsyncStub @@ -140,6 +144,8 @@ async def _connect( # type: ignore ) -> None: stub: AgentRpcAsyncStub = agent_worker_pb2_grpc.AgentRpcStub(channel) # type: ignore + from grpc.aio import StreamStreamCall + # TODO: where do exceptions from reading the iterable go? How do we recover from those? recv_stream: StreamStreamCall[agent_worker_pb2.Message, agent_worker_pb2.Message] = stub.OpenChannel( # type: ignore QueueAsyncIterable(send_queue) @@ -366,11 +372,17 @@ async def publish_message( *, sender: AgentId | None = None, cancellation_token: CancellationToken | None = None, + message_id: str | None = None, ) -> None: if not self._running: raise ValueError("Runtime must be running when publishing message.") if self._host_connection is None: raise RuntimeError("Host connection is not set.") + if message_id is None: + message_id = str(uuid.uuid4()) + + # TODO: consume message_id + message_type = self._serialization_registry.type_name(message) with self._trace_helper.trace_block( "create", topic_id, parent=None, extraAttributes={"message_type": message_type} @@ -442,6 +454,7 @@ async def _process_request(self, request: agent_worker_pb2.RpcRequest) -> None: topic_id=None, is_rpc=True, cancellation_token=CancellationToken(), + message_id=request.request_id, ) # Call the receiving agent. @@ -525,11 +538,13 @@ async def _process_event(self, event: agent_worker_pb2.Event) -> None: for agent_id in recipients: if agent_id == sender: continue + # TODO: consume message_id message_context = MessageContext( sender=sender, topic_id=topic_id, is_rpc=False, cancellation_token=CancellationToken(), + message_id="NOT_DEFINED_TODO_FIX", ) agent = await self._get_agent(agent_id) with MessageHandlerContext.populate_context(agent.id): @@ -700,27 +715,44 @@ async def try_get_underlying_agent_instance(self, id: AgentId, type: Type[T] = A async def add_subscription(self, subscription: Subscription) -> None: if self._host_connection is None: raise RuntimeError("Host connection is not set.") - if not isinstance(subscription, TypeSubscription): - raise ValueError("Only TypeSubscription is supported.") - # Add to local subscription manager. - await self._subscription_manager.add_subscription(subscription) # Create a future for the subscription response. future = asyncio.get_event_loop().create_future() request_id = await self._get_new_request_id() + + match subscription: + case TypeSubscription(topic_type=topic_type, agent_type=agent_type): + message = agent_worker_pb2.Message( + addSubscriptionRequest=agent_worker_pb2.AddSubscriptionRequest( + request_id=request_id, + subscription=agent_worker_pb2.Subscription( + typeSubscription=agent_worker_pb2.TypeSubscription( + topic_type=topic_type, agent_type=agent_type + ) + ), + ) + ) + case TypePrefixSubscription(topic_type_prefix=topic_type_prefix, agent_type=agent_type): + message = agent_worker_pb2.Message( + addSubscriptionRequest=agent_worker_pb2.AddSubscriptionRequest( + request_id=request_id, + subscription=agent_worker_pb2.Subscription( + typePrefixSubscription=agent_worker_pb2.TypePrefixSubscription( + topic_type_prefix=topic_type_prefix, agent_type=agent_type + ) + ), + ) + ) + case _: + raise ValueError("Unsupported subscription type.") + + # Add the future to the pending requests. self._pending_requests[request_id] = future + # Add to local subscription manager. + await self._subscription_manager.add_subscription(subscription) + # Send the subscription to the host. - message = agent_worker_pb2.Message( - addSubscriptionRequest=agent_worker_pb2.AddSubscriptionRequest( - request_id=request_id, - subscription=agent_worker_pb2.Subscription( - typeSubscription=agent_worker_pb2.TypeSubscription( - topic_type=subscription.topic_type, agent_type=subscription.agent_type - ) - ), - ) - ) await self._host_connection.send(message) # Wait for the subscription response. diff --git a/python/packages/autogen-core/src/autogen_core/application/_worker_runtime_host.py b/python/packages/autogen-core/src/autogen_core/application/_worker_runtime_host.py index e6585098bdbd..d7fee07ff1f8 100644 --- a/python/packages/autogen-core/src/autogen_core/application/_worker_runtime_host.py +++ b/python/packages/autogen-core/src/autogen_core/application/_worker_runtime_host.py @@ -3,11 +3,14 @@ import signal from typing import Optional, Sequence -import grpc - -from autogen_core.base._type_helpers import ChannelArgumentType - +from ..base._type_helpers import ChannelArgumentType +from ._utils import GRPC_IMPORT_ERROR_STR from ._worker_runtime_host_servicer import WorkerAgentRuntimeHostServicer + +try: + import grpc +except ImportError as e: + raise ImportError(GRPC_IMPORT_ERROR_STR) from e from .protos import agent_worker_pb2_grpc logger = logging.getLogger("autogen_core") diff --git a/python/packages/autogen-core/src/autogen_core/application/_worker_runtime_host_servicer.py b/python/packages/autogen-core/src/autogen_core/application/_worker_runtime_host_servicer.py index 1ed794c35f29..5cd2bf8ea9b8 100644 --- a/python/packages/autogen-core/src/autogen_core/application/_worker_runtime_host_servicer.py +++ b/python/packages/autogen-core/src/autogen_core/application/_worker_runtime_host_servicer.py @@ -4,11 +4,18 @@ from asyncio import Future, Task from typing import Any, Dict, Set -import grpc +from autogen_core.components._type_prefix_subscription import TypePrefixSubscription -from ..base import TopicId +from ..base import Subscription, TopicId from ..components import TypeSubscription from ._helpers import SubscriptionManager +from ._utils import GRPC_IMPORT_ERROR_STR + +try: + import grpc +except ImportError as e: + raise ImportError(GRPC_IMPORT_ERROR_STR) from e + from .protos import agent_worker_pb2, agent_worker_pb2_grpc logger = logging.getLogger("autogen_core") @@ -216,34 +223,46 @@ async def _process_add_subscription_request( self, add_subscription_req: agent_worker_pb2.AddSubscriptionRequest, client_id: int ) -> None: oneofcase = add_subscription_req.subscription.WhichOneof("subscription") + subscription: Subscription | None = None match oneofcase: case "typeSubscription": type_subscription_msg: agent_worker_pb2.TypeSubscription = ( add_subscription_req.subscription.typeSubscription ) - type_subscription = TypeSubscription( + subscription = TypeSubscription( topic_type=type_subscription_msg.topic_type, agent_type=type_subscription_msg.agent_type ) - try: - await self._subscription_manager.add_subscription(type_subscription) - subscription_ids = self._client_id_to_subscription_id_mapping.setdefault(client_id, set()) - subscription_ids.add(type_subscription.id) - success = True - error = None - except ValueError as e: - success = False - error = str(e) - # Send a response back to the client. - await self._send_queues[client_id].put( - agent_worker_pb2.Message( - addSubscriptionResponse=agent_worker_pb2.AddSubscriptionResponse( - request_id=add_subscription_req.request_id, success=success, error=error - ) - ) + + case "typePrefixSubscription": + type_prefix_subscription_msg: agent_worker_pb2.TypePrefixSubscription = ( + add_subscription_req.subscription.typePrefixSubscription + ) + subscription = TypePrefixSubscription( + topic_type_prefix=type_prefix_subscription_msg.topic_type_prefix, + agent_type=type_prefix_subscription_msg.agent_type, ) case None: logger.warning("Received empty subscription message") + if subscription is not None: + try: + await self._subscription_manager.add_subscription(subscription) + subscription_ids = self._client_id_to_subscription_id_mapping.setdefault(client_id, set()) + subscription_ids.add(subscription.id) + success = True + error = None + except ValueError as e: + success = False + error = str(e) + # Send a response back to the client. + await self._send_queues[client_id].put( + agent_worker_pb2.Message( + addSubscriptionResponse=agent_worker_pb2.AddSubscriptionResponse( + request_id=add_subscription_req.request_id, success=success, error=error + ) + ) + ) + async def GetState( # type: ignore self, request: agent_worker_pb2.AgentId, diff --git a/python/packages/autogen-core/src/autogen_core/application/protos/agent_worker_pb2.py b/python/packages/autogen-core/src/autogen_core/application/protos/agent_worker_pb2.py index 0637e866c4de..8f143d770aef 100644 --- a/python/packages/autogen-core/src/autogen_core/application/protos/agent_worker_pb2.py +++ b/python/packages/autogen-core/src/autogen_core/application/protos/agent_worker_pb2.py @@ -16,7 +16,7 @@ from google.protobuf import any_pb2 as google_dot_protobuf_dot_any__pb2 -DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x12\x61gent_worker.proto\x12\x06\x61gents\x1a\x10\x63loudevent.proto\x1a\x19google/protobuf/any.proto\"\'\n\x07TopicId\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x0e\n\x06source\x18\x02 \x01(\t\"$\n\x07\x41gentId\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\"E\n\x07Payload\x12\x11\n\tdata_type\x18\x01 \x01(\t\x12\x19\n\x11\x64\x61ta_content_type\x18\x02 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\x0c\"\x89\x02\n\nRpcRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12$\n\x06source\x18\x02 \x01(\x0b\x32\x0f.agents.AgentIdH\x00\x88\x01\x01\x12\x1f\n\x06target\x18\x03 \x01(\x0b\x32\x0f.agents.AgentId\x12\x0e\n\x06method\x18\x04 \x01(\t\x12 \n\x07payload\x18\x05 \x01(\x0b\x32\x0f.agents.Payload\x12\x32\n\x08metadata\x18\x06 \x03(\x0b\x32 .agents.RpcRequest.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\t\n\x07_source\"\xb8\x01\n\x0bRpcResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12 \n\x07payload\x18\x02 \x01(\x0b\x32\x0f.agents.Payload\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12\x33\n\x08metadata\x18\x04 \x03(\x0b\x32!.agents.RpcResponse.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xe4\x01\n\x05\x45vent\x12\x12\n\ntopic_type\x18\x01 \x01(\t\x12\x14\n\x0ctopic_source\x18\x02 \x01(\t\x12$\n\x06source\x18\x03 \x01(\x0b\x32\x0f.agents.AgentIdH\x00\x88\x01\x01\x12 \n\x07payload\x18\x04 \x01(\x0b\x32\x0f.agents.Payload\x12-\n\x08metadata\x18\x05 \x03(\x0b\x32\x1b.agents.Event.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\t\n\x07_source\"<\n\x18RegisterAgentTypeRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\"^\n\x19RegisterAgentTypeResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x12\n\x05\x65rror\x18\x03 \x01(\tH\x00\x88\x01\x01\x42\x08\n\x06_error\":\n\x10TypeSubscription\x12\x12\n\ntopic_type\x18\x01 \x01(\t\x12\x12\n\nagent_type\x18\x02 \x01(\t\"T\n\x0cSubscription\x12\x34\n\x10typeSubscription\x18\x01 \x01(\x0b\x32\x18.agents.TypeSubscriptionH\x00\x42\x0e\n\x0csubscription\"X\n\x16\x41\x64\x64SubscriptionRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12*\n\x0csubscription\x18\x02 \x01(\x0b\x32\x14.agents.Subscription\"\\\n\x17\x41\x64\x64SubscriptionResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x12\n\x05\x65rror\x18\x03 \x01(\tH\x00\x88\x01\x01\x42\x08\n\x06_error\"\x9d\x01\n\nAgentState\x12!\n\x08\x61gent_id\x18\x01 \x01(\x0b\x32\x0f.agents.AgentId\x12\x0c\n\x04\x65Tag\x18\x02 \x01(\t\x12\x15\n\x0b\x62inary_data\x18\x03 \x01(\x0cH\x00\x12\x13\n\ttext_data\x18\x04 \x01(\tH\x00\x12*\n\nproto_data\x18\x05 \x01(\x0b\x32\x14.google.protobuf.AnyH\x00\x42\x06\n\x04\x64\x61ta\"j\n\x10GetStateResponse\x12\'\n\x0b\x61gent_state\x18\x01 \x01(\x0b\x32\x12.agents.AgentState\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x12\n\x05\x65rror\x18\x03 \x01(\tH\x00\x88\x01\x01\x42\x08\n\x06_error\"B\n\x11SaveStateResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x12\n\x05\x65rror\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x08\n\x06_error\"\xc6\x03\n\x07Message\x12%\n\x07request\x18\x01 \x01(\x0b\x32\x12.agents.RpcRequestH\x00\x12\'\n\x08response\x18\x02 \x01(\x0b\x32\x13.agents.RpcResponseH\x00\x12\x1e\n\x05\x65vent\x18\x03 \x01(\x0b\x32\r.agents.EventH\x00\x12\x44\n\x18registerAgentTypeRequest\x18\x04 \x01(\x0b\x32 .agents.RegisterAgentTypeRequestH\x00\x12\x46\n\x19registerAgentTypeResponse\x18\x05 \x01(\x0b\x32!.agents.RegisterAgentTypeResponseH\x00\x12@\n\x16\x61\x64\x64SubscriptionRequest\x18\x06 \x01(\x0b\x32\x1e.agents.AddSubscriptionRequestH\x00\x12\x42\n\x17\x61\x64\x64SubscriptionResponse\x18\x07 \x01(\x0b\x32\x1f.agents.AddSubscriptionResponseH\x00\x12,\n\ncloudEvent\x18\x08 \x01(\x0b\x32\x16.cloudevent.CloudEventH\x00\x42\t\n\x07message2\xb2\x01\n\x08\x41gentRpc\x12\x33\n\x0bOpenChannel\x12\x0f.agents.Message\x1a\x0f.agents.Message(\x01\x30\x01\x12\x35\n\x08GetState\x12\x0f.agents.AgentId\x1a\x18.agents.GetStateResponse\x12:\n\tSaveState\x12\x12.agents.AgentState\x1a\x19.agents.SaveStateResponseB!\xaa\x02\x1eMicrosoft.AutoGen.Abstractionsb\x06proto3') +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x12\x61gent_worker.proto\x12\x06\x61gents\x1a\x10\x63loudevent.proto\x1a\x19google/protobuf/any.proto\"\'\n\x07TopicId\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x0e\n\x06source\x18\x02 \x01(\t\"$\n\x07\x41gentId\x12\x0c\n\x04type\x18\x01 \x01(\t\x12\x0b\n\x03key\x18\x02 \x01(\t\"E\n\x07Payload\x12\x11\n\tdata_type\x18\x01 \x01(\t\x12\x19\n\x11\x64\x61ta_content_type\x18\x02 \x01(\t\x12\x0c\n\x04\x64\x61ta\x18\x03 \x01(\x0c\"\x89\x02\n\nRpcRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12$\n\x06source\x18\x02 \x01(\x0b\x32\x0f.agents.AgentIdH\x00\x88\x01\x01\x12\x1f\n\x06target\x18\x03 \x01(\x0b\x32\x0f.agents.AgentId\x12\x0e\n\x06method\x18\x04 \x01(\t\x12 \n\x07payload\x18\x05 \x01(\x0b\x32\x0f.agents.Payload\x12\x32\n\x08metadata\x18\x06 \x03(\x0b\x32 .agents.RpcRequest.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\t\n\x07_source\"\xb8\x01\n\x0bRpcResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12 \n\x07payload\x18\x02 \x01(\x0b\x32\x0f.agents.Payload\x12\r\n\x05\x65rror\x18\x03 \x01(\t\x12\x33\n\x08metadata\x18\x04 \x03(\x0b\x32!.agents.RpcResponse.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\"\xe4\x01\n\x05\x45vent\x12\x12\n\ntopic_type\x18\x01 \x01(\t\x12\x14\n\x0ctopic_source\x18\x02 \x01(\t\x12$\n\x06source\x18\x03 \x01(\x0b\x32\x0f.agents.AgentIdH\x00\x88\x01\x01\x12 \n\x07payload\x18\x04 \x01(\x0b\x32\x0f.agents.Payload\x12-\n\x08metadata\x18\x05 \x03(\x0b\x32\x1b.agents.Event.MetadataEntry\x1a/\n\rMetadataEntry\x12\x0b\n\x03key\x18\x01 \x01(\t\x12\r\n\x05value\x18\x02 \x01(\t:\x02\x38\x01\x42\t\n\x07_source\"<\n\x18RegisterAgentTypeRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x0c\n\x04type\x18\x02 \x01(\t\"^\n\x19RegisterAgentTypeResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x12\n\x05\x65rror\x18\x03 \x01(\tH\x00\x88\x01\x01\x42\x08\n\x06_error\":\n\x10TypeSubscription\x12\x12\n\ntopic_type\x18\x01 \x01(\t\x12\x12\n\nagent_type\x18\x02 \x01(\t\"G\n\x16TypePrefixSubscription\x12\x19\n\x11topic_type_prefix\x18\x01 \x01(\t\x12\x12\n\nagent_type\x18\x02 \x01(\t\"\x96\x01\n\x0cSubscription\x12\x34\n\x10typeSubscription\x18\x01 \x01(\x0b\x32\x18.agents.TypeSubscriptionH\x00\x12@\n\x16typePrefixSubscription\x18\x02 \x01(\x0b\x32\x1e.agents.TypePrefixSubscriptionH\x00\x42\x0e\n\x0csubscription\"X\n\x16\x41\x64\x64SubscriptionRequest\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12*\n\x0csubscription\x18\x02 \x01(\x0b\x32\x14.agents.Subscription\"\\\n\x17\x41\x64\x64SubscriptionResponse\x12\x12\n\nrequest_id\x18\x01 \x01(\t\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x12\n\x05\x65rror\x18\x03 \x01(\tH\x00\x88\x01\x01\x42\x08\n\x06_error\"\x9d\x01\n\nAgentState\x12!\n\x08\x61gent_id\x18\x01 \x01(\x0b\x32\x0f.agents.AgentId\x12\x0c\n\x04\x65Tag\x18\x02 \x01(\t\x12\x15\n\x0b\x62inary_data\x18\x03 \x01(\x0cH\x00\x12\x13\n\ttext_data\x18\x04 \x01(\tH\x00\x12*\n\nproto_data\x18\x05 \x01(\x0b\x32\x14.google.protobuf.AnyH\x00\x42\x06\n\x04\x64\x61ta\"j\n\x10GetStateResponse\x12\'\n\x0b\x61gent_state\x18\x01 \x01(\x0b\x32\x12.agents.AgentState\x12\x0f\n\x07success\x18\x02 \x01(\x08\x12\x12\n\x05\x65rror\x18\x03 \x01(\tH\x00\x88\x01\x01\x42\x08\n\x06_error\"B\n\x11SaveStateResponse\x12\x0f\n\x07success\x18\x01 \x01(\x08\x12\x12\n\x05\x65rror\x18\x02 \x01(\tH\x00\x88\x01\x01\x42\x08\n\x06_error\"\xc6\x03\n\x07Message\x12%\n\x07request\x18\x01 \x01(\x0b\x32\x12.agents.RpcRequestH\x00\x12\'\n\x08response\x18\x02 \x01(\x0b\x32\x13.agents.RpcResponseH\x00\x12\x1e\n\x05\x65vent\x18\x03 \x01(\x0b\x32\r.agents.EventH\x00\x12\x44\n\x18registerAgentTypeRequest\x18\x04 \x01(\x0b\x32 .agents.RegisterAgentTypeRequestH\x00\x12\x46\n\x19registerAgentTypeResponse\x18\x05 \x01(\x0b\x32!.agents.RegisterAgentTypeResponseH\x00\x12@\n\x16\x61\x64\x64SubscriptionRequest\x18\x06 \x01(\x0b\x32\x1e.agents.AddSubscriptionRequestH\x00\x12\x42\n\x17\x61\x64\x64SubscriptionResponse\x18\x07 \x01(\x0b\x32\x1f.agents.AddSubscriptionResponseH\x00\x12,\n\ncloudEvent\x18\x08 \x01(\x0b\x32\x16.cloudevent.CloudEventH\x00\x42\t\n\x07message2\xb2\x01\n\x08\x41gentRpc\x12\x33\n\x0bOpenChannel\x12\x0f.agents.Message\x1a\x0f.agents.Message(\x01\x30\x01\x12\x35\n\x08GetState\x12\x0f.agents.AgentId\x1a\x18.agents.GetStateResponse\x12:\n\tSaveState\x12\x12.agents.AgentState\x1a\x19.agents.SaveStateResponseB!\xaa\x02\x1eMicrosoft.AutoGen.Abstractionsb\x06proto3') _globals = globals() _builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) @@ -54,20 +54,22 @@ _globals['_REGISTERAGENTTYPERESPONSE']._serialized_end=1067 _globals['_TYPESUBSCRIPTION']._serialized_start=1069 _globals['_TYPESUBSCRIPTION']._serialized_end=1127 - _globals['_SUBSCRIPTION']._serialized_start=1129 - _globals['_SUBSCRIPTION']._serialized_end=1213 - _globals['_ADDSUBSCRIPTIONREQUEST']._serialized_start=1215 - _globals['_ADDSUBSCRIPTIONREQUEST']._serialized_end=1303 - _globals['_ADDSUBSCRIPTIONRESPONSE']._serialized_start=1305 - _globals['_ADDSUBSCRIPTIONRESPONSE']._serialized_end=1397 - _globals['_AGENTSTATE']._serialized_start=1400 - _globals['_AGENTSTATE']._serialized_end=1557 - _globals['_GETSTATERESPONSE']._serialized_start=1559 - _globals['_GETSTATERESPONSE']._serialized_end=1665 - _globals['_SAVESTATERESPONSE']._serialized_start=1667 - _globals['_SAVESTATERESPONSE']._serialized_end=1733 - _globals['_MESSAGE']._serialized_start=1736 - _globals['_MESSAGE']._serialized_end=2190 - _globals['_AGENTRPC']._serialized_start=2193 - _globals['_AGENTRPC']._serialized_end=2371 + _globals['_TYPEPREFIXSUBSCRIPTION']._serialized_start=1129 + _globals['_TYPEPREFIXSUBSCRIPTION']._serialized_end=1200 + _globals['_SUBSCRIPTION']._serialized_start=1203 + _globals['_SUBSCRIPTION']._serialized_end=1353 + _globals['_ADDSUBSCRIPTIONREQUEST']._serialized_start=1355 + _globals['_ADDSUBSCRIPTIONREQUEST']._serialized_end=1443 + _globals['_ADDSUBSCRIPTIONRESPONSE']._serialized_start=1445 + _globals['_ADDSUBSCRIPTIONRESPONSE']._serialized_end=1537 + _globals['_AGENTSTATE']._serialized_start=1540 + _globals['_AGENTSTATE']._serialized_end=1697 + _globals['_GETSTATERESPONSE']._serialized_start=1699 + _globals['_GETSTATERESPONSE']._serialized_end=1805 + _globals['_SAVESTATERESPONSE']._serialized_start=1807 + _globals['_SAVESTATERESPONSE']._serialized_end=1873 + _globals['_MESSAGE']._serialized_start=1876 + _globals['_MESSAGE']._serialized_end=2330 + _globals['_AGENTRPC']._serialized_start=2333 + _globals['_AGENTRPC']._serialized_end=2511 # @@protoc_insertion_point(module_scope) diff --git a/python/packages/autogen-core/src/autogen_core/application/protos/agent_worker_pb2.pyi b/python/packages/autogen-core/src/autogen_core/application/protos/agent_worker_pb2.pyi index 522124ab8891..728bfafcc81a 100644 --- a/python/packages/autogen-core/src/autogen_core/application/protos/agent_worker_pb2.pyi +++ b/python/packages/autogen-core/src/autogen_core/application/protos/agent_worker_pb2.pyi @@ -273,21 +273,43 @@ class TypeSubscription(google.protobuf.message.Message): global___TypeSubscription = TypeSubscription +@typing.final +class TypePrefixSubscription(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + TOPIC_TYPE_PREFIX_FIELD_NUMBER: builtins.int + AGENT_TYPE_FIELD_NUMBER: builtins.int + topic_type_prefix: builtins.str + agent_type: builtins.str + def __init__( + self, + *, + topic_type_prefix: builtins.str = ..., + agent_type: builtins.str = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["agent_type", b"agent_type", "topic_type_prefix", b"topic_type_prefix"]) -> None: ... + +global___TypePrefixSubscription = TypePrefixSubscription + @typing.final class Subscription(google.protobuf.message.Message): DESCRIPTOR: google.protobuf.descriptor.Descriptor TYPESUBSCRIPTION_FIELD_NUMBER: builtins.int + TYPEPREFIXSUBSCRIPTION_FIELD_NUMBER: builtins.int @property def typeSubscription(self) -> global___TypeSubscription: ... + @property + def typePrefixSubscription(self) -> global___TypePrefixSubscription: ... def __init__( self, *, typeSubscription: global___TypeSubscription | None = ..., + typePrefixSubscription: global___TypePrefixSubscription | None = ..., ) -> None: ... - def HasField(self, field_name: typing.Literal["subscription", b"subscription", "typeSubscription", b"typeSubscription"]) -> builtins.bool: ... - def ClearField(self, field_name: typing.Literal["subscription", b"subscription", "typeSubscription", b"typeSubscription"]) -> None: ... - def WhichOneof(self, oneof_group: typing.Literal["subscription", b"subscription"]) -> typing.Literal["typeSubscription"] | None: ... + def HasField(self, field_name: typing.Literal["subscription", b"subscription", "typePrefixSubscription", b"typePrefixSubscription", "typeSubscription", b"typeSubscription"]) -> builtins.bool: ... + def ClearField(self, field_name: typing.Literal["subscription", b"subscription", "typePrefixSubscription", b"typePrefixSubscription", "typeSubscription", b"typeSubscription"]) -> None: ... + def WhichOneof(self, oneof_group: typing.Literal["subscription", b"subscription"]) -> typing.Literal["typeSubscription", "typePrefixSubscription"] | None: ... global___Subscription = Subscription diff --git a/python/packages/autogen-core/src/autogen_core/base/_agent_runtime.py b/python/packages/autogen-core/src/autogen_core/base/_agent_runtime.py index defb1de72921..27c37ad9f349 100644 --- a/python/packages/autogen-core/src/autogen_core/base/_agent_runtime.py +++ b/python/packages/autogen-core/src/autogen_core/base/_agent_runtime.py @@ -55,6 +55,7 @@ async def publish_message( *, sender: AgentId | None = None, cancellation_token: CancellationToken | None = None, + message_id: str | None = None, ) -> None: """Publish a message to all agents in the given namespace, or if no namespace is provided, the namespace of the sender. @@ -64,7 +65,8 @@ async def publish_message( message (Any): The message to publish. topic (TopicId): The topic to publish the message to. sender (AgentId | None, optional): The agent which sent the message. Defaults to None. - cancellation_token (CancellationToken | None, optional): Token used to cancel an in progress . Defaults to None. + cancellation_token (CancellationToken | None, optional): Token used to cancel an in progress. Defaults to None. + message_id (str | None, optional): The message id. If None, a new message id will be generated. Defaults to None. This message id must be unique. and is recommended to be a UUID. Raises: UndeliverableException: If the message cannot be delivered. @@ -89,19 +91,6 @@ async def register( agent_factory (Callable[[], T]): The factory that creates the agent, where T is a concrete Agent type. Inside the factory, use `autogen_core.base.AgentInstantiationContext` to access variables like the current runtime and agent ID. subscriptions (Callable[[], list[Subscription]] | list[Subscription] | None, optional): The subscriptions that the agent should be subscribed to. Defaults to None. - Example: - .. code-block:: python - - runtime.register( - "chat_agent", - lambda: ChatCompletionAgent( - description="A generic chat agent.", - system_messages=[SystemMessage("You are a helpful assistant")], - model_client=OpenAIChatCompletionClient(model="gpt-4o"), - memory=BufferedChatMemory(buffer_size=10), - ), - ) - """ ... @@ -117,20 +106,6 @@ async def register_factory( Args: type (str): The type of agent this factory creates. It is not the same as agent class name. The `type` parameter is used to differentiate between different factory functions rather than agent classes. agent_factory (Callable[[], T]): The factory that creates the agent, where T is a concrete Agent type. Inside the factory, use `autogen_core.base.AgentInstantiationContext` to access variables like the current runtime and agent ID. - - Example: - .. code-block:: python - - runtime.register( - "chat_agent", - lambda: ChatCompletionAgent( - description="A generic chat agent.", - system_messages=[SystemMessage("You are a helpful assistant")], - model_client=OpenAIChatCompletionClient(model="gpt-4o"), - memory=BufferedChatMemory(buffer_size=10), - ), - ) - """ ... diff --git a/python/packages/autogen-core/src/autogen_core/base/_message_context.py b/python/packages/autogen-core/src/autogen_core/base/_message_context.py index 0a2c2973bc01..c5c00559ed0e 100644 --- a/python/packages/autogen-core/src/autogen_core/base/_message_context.py +++ b/python/packages/autogen-core/src/autogen_core/base/_message_context.py @@ -11,3 +11,4 @@ class MessageContext: topic_id: TopicId | None is_rpc: bool cancellation_token: CancellationToken + message_id: str diff --git a/python/packages/autogen-core/src/autogen_core/base/_serialization.py b/python/packages/autogen-core/src/autogen_core/base/_serialization.py index 5c8e8cc772b0..51fd531feac5 100644 --- a/python/packages/autogen-core/src/autogen_core/base/_serialization.py +++ b/python/packages/autogen-core/src/autogen_core/base/_serialization.py @@ -195,7 +195,7 @@ def try_get_known_serializers_for_type(cls: type[Any]) -> list[MessageSerializer serializers: List[MessageSerializer[Any]] = [] if issubclass(cls, BaseModel): serializers.append(PydanticJsonMessageSerializer(cls)) - elif isinstance(cls, IsDataclass): + elif is_dataclass(cls): serializers.append(DataclassJsonMessageSerializer(cls)) elif issubclass(cls, Message): serializers.append(ProtobufMessageSerializer(cls)) diff --git a/python/packages/autogen-core/src/autogen_core/base/intervention.py b/python/packages/autogen-core/src/autogen_core/base/intervention.py index c9600ac9e13c..3b771c931a33 100644 --- a/python/packages/autogen-core/src/autogen_core/base/intervention.py +++ b/python/packages/autogen-core/src/autogen_core/base/intervention.py @@ -18,6 +18,11 @@ class DropMessage: ... class InterventionHandler(Protocol): + """An intervention handler is a class that can be used to modify, log or drop messages that are being processed by the :class:`autogen_core.base.AgentRuntime`. + + Note: Returning None from any of the intervention handler methods will result in a warning being issued and treated as "no change". If you intend to drop a message, you should return :class:`DropMessage` explicitly. + """ + async def on_send(self, message: Any, *, sender: AgentId | None, recipient: AgentId) -> Any | type[DropMessage]: ... async def on_publish(self, message: Any, *, sender: AgentId | None) -> Any | type[DropMessage]: ... async def on_response( @@ -26,6 +31,10 @@ async def on_response( class DefaultInterventionHandler(InterventionHandler): + """Simple class that provides a default implementation for all intervention + handler methods, that simply returns the message unchanged. Allows for easy + subclassing to override only the desired methods.""" + async def on_send(self, message: Any, *, sender: AgentId | None, recipient: AgentId) -> Any | type[DropMessage]: return message diff --git a/python/packages/autogen-core/src/autogen_core/components/__init__.py b/python/packages/autogen-core/src/autogen_core/components/__init__.py index 9ad8bdb35a19..4c4d02f3be25 100644 --- a/python/packages/autogen-core/src/autogen_core/components/__init__.py +++ b/python/packages/autogen-core/src/autogen_core/components/__init__.py @@ -7,6 +7,7 @@ from ._default_topic import DefaultTopicId from ._image import Image from ._routed_agent import RoutedAgent, TypeRoutedAgent, event, message_handler, rpc +from ._type_prefix_subscription import TypePrefixSubscription from ._type_subscription import TypeSubscription from ._types import FunctionCall @@ -24,4 +25,5 @@ "DefaultTopicId", "default_subscription", "type_subscription", + "TypePrefixSubscription", ] diff --git a/python/packages/autogen-core/src/autogen_core/components/_default_subscription.py b/python/packages/autogen-core/src/autogen_core/components/_default_subscription.py index aea5c381692d..3e1c0c4301cd 100644 --- a/python/packages/autogen-core/src/autogen_core/components/_default_subscription.py +++ b/python/packages/autogen-core/src/autogen_core/components/_default_subscription.py @@ -10,12 +10,6 @@ class DefaultSubscription(TypeSubscription): This topic by default uses the "default" topic type and attempts to detect the agent type to use based on the instantiation context. - Example: - - .. code-block:: python - - await runtime.register("MyAgent", agent_factory, lambda: [DefaultSubscription()]) - Args: topic_type (str, optional): The topic type to subscribe to. Defaults to "default". agent_type (str, optional): The agent type to use for the subscription. Defaults to None, in which case it will attempt to detect the agent type based on the instantiation context. diff --git a/python/packages/autogen-core/src/autogen_core/components/_routed_agent.py b/python/packages/autogen-core/src/autogen_core/components/_routed_agent.py index 9c21670d0605..e7f266bf49d6 100644 --- a/python/packages/autogen-core/src/autogen_core/components/_routed_agent.py +++ b/python/packages/autogen-core/src/autogen_core/components/_routed_agent.py @@ -422,9 +422,24 @@ class RoutedAgent(BaseAgent): .. code-block:: python + from dataclasses import dataclass from autogen_core.base import MessageContext from autogen_core.components import RoutedAgent, event, rpc - # Assume Message, MessageWithContent, and Response are defined elsewhere. + + + @dataclass + class Message: + pass + + + @dataclass + class MessageWithContent: + content: str + + + @dataclass + class Response: + pass class MyAgent(RoutedAgent): @@ -433,9 +448,10 @@ def __init__(self): @event async def handle_event_message(self, message: Message, ctx: MessageContext) -> None: - self.publish_message(MessageWithContent("event handled"), ctx.topic_id) + assert ctx.topic_id is not None + await self.publish_message(MessageWithContent("event handled"), ctx.topic_id) - @rpc(match=lambda message, ctx: message.content == "special") + @rpc(match=lambda message, ctx: message.content == "special") # type: ignore async def handle_special_rpc_message(self, message: MessageWithContent, ctx: MessageContext) -> Response: return Response() """ diff --git a/python/packages/autogen-core/src/autogen_core/components/_type_prefix_subscription.py b/python/packages/autogen-core/src/autogen_core/components/_type_prefix_subscription.py new file mode 100644 index 000000000000..d71b587d1418 --- /dev/null +++ b/python/packages/autogen-core/src/autogen_core/components/_type_prefix_subscription.py @@ -0,0 +1,63 @@ +import uuid + +from ..base import AgentId, Subscription, TopicId +from ..base.exceptions import CantHandleException + + +class TypePrefixSubscription(Subscription): + """This subscription matches on topics based on a prefix of the type and maps to agents using the source of the topic as the agent key. + + This subscription causes each source to have its own agent instance. + + Example: + + .. code-block:: python + + from autogen_core.components import TypePrefixSubscription + + subscription = TypePrefixSubscription(topic_type_prefix="t1", agent_type="a1") + + In this case: + + - A topic_id with type `t1` and source `s1` will be handled by an agent of type `a1` with key `s1` + - A topic_id with type `t1` and source `s2` will be handled by an agent of type `a1` with key `s2`. + - A topic_id with type `t1SUFFIX` and source `s2` will be handled by an agent of type `a1` with key `s2`. + + Args: + topic_type_prefix (str): Topic type prefix to match against + agent_type (str): Agent type to handle this subscription + """ + + def __init__(self, topic_type_prefix: str, agent_type: str): + self._topic_type_prefix = topic_type_prefix + self._agent_type = agent_type + self._id = str(uuid.uuid4()) + + @property + def id(self) -> str: + return self._id + + @property + def topic_type_prefix(self) -> str: + return self._topic_type_prefix + + @property + def agent_type(self) -> str: + return self._agent_type + + def is_match(self, topic_id: TopicId) -> bool: + return topic_id.type.startswith(self._topic_type_prefix) + + def map_to_agent(self, topic_id: TopicId) -> AgentId: + if not self.is_match(topic_id): + raise CantHandleException("TopicId does not match the subscription") + + return AgentId(type=self._agent_type, key=topic_id.source) + + def __eq__(self, other: object) -> bool: + if not isinstance(other, TypePrefixSubscription): + return False + + return self.id == other.id or ( + self.agent_type == other.agent_type and self.topic_type_prefix == other.topic_type_prefix + ) diff --git a/python/packages/autogen-core/src/autogen_core/components/_type_subscription.py b/python/packages/autogen-core/src/autogen_core/components/_type_subscription.py index d212317566f5..94def76595d5 100644 --- a/python/packages/autogen-core/src/autogen_core/components/_type_subscription.py +++ b/python/packages/autogen-core/src/autogen_core/components/_type_subscription.py @@ -1,7 +1,6 @@ import uuid -from typing import TypeVar -from ..base import AgentId, BaseAgent, Subscription, TopicId +from ..base import AgentId, Subscription, TopicId from ..base.exceptions import CantHandleException @@ -14,6 +13,8 @@ class TypeSubscription(Subscription): .. code-block:: python + from autogen_core.components import TypeSubscription + subscription = TypeSubscription(topic_type="t1", agent_type="a1") In this case: @@ -57,6 +58,3 @@ def __eq__(self, other: object) -> bool: return False return self.id == other.id or (self.agent_type == other.agent_type and self.topic_type == other.topic_type) - - -BaseAgentType = TypeVar("BaseAgentType", bound="BaseAgent") diff --git a/python/packages/autogen-core/src/autogen_core/components/code_executor/_impl/local_commandline_code_executor.py b/python/packages/autogen-core/src/autogen_core/components/code_executor/_impl/local_commandline_code_executor.py index deca8355fbb3..31779f65679f 100644 --- a/python/packages/autogen-core/src/autogen_core/components/code_executor/_impl/local_commandline_code_executor.py +++ b/python/packages/autogen-core/src/autogen_core/components/code_executor/_impl/local_commandline_code_executor.py @@ -67,25 +67,31 @@ class LocalCommandLineCodeExecutor(CodeExecutor): import venv from pathlib import Path + import asyncio from autogen_core.base import CancellationToken from autogen_core.components.code_executor import CodeBlock, LocalCommandLineCodeExecutor - work_dir = Path("coding") - work_dir.mkdir(exist_ok=True) - venv_dir = work_dir / ".venv" - venv_builder = venv.EnvBuilder(with_pip=True) - venv_builder.create(venv_dir) - venv_context = venv_builder.ensure_directories(venv_dir) + async def example(): + work_dir = Path("coding") + work_dir.mkdir(exist_ok=True) - local_executor = LocalCommandLineCodeExecutor(work_dir=work_dir, virtual_env_context=venv_context) - await local_executor.execute_code_blocks( - code_blocks=[ - CodeBlock(language="bash", code="pip install matplotlib"), - ], - cancellation_token=CancellationToken(), - ) + venv_dir = work_dir / ".venv" + venv_builder = venv.EnvBuilder(with_pip=True) + venv_builder.create(venv_dir) + venv_context = venv_builder.ensure_directories(venv_dir) + + local_executor = LocalCommandLineCodeExecutor(work_dir=work_dir, virtual_env_context=venv_context) + await local_executor.execute_code_blocks( + code_blocks=[ + CodeBlock(language="bash", code="pip install matplotlib"), + ], + cancellation_token=CancellationToken(), + ) + + + asyncio.run(example()) """ diff --git a/python/packages/autogen-core/src/autogen_core/components/models/__init__.py b/python/packages/autogen-core/src/autogen_core/components/models/__init__.py index f57c82289ddc..9b12aa702edd 100644 --- a/python/packages/autogen-core/src/autogen_core/components/models/__init__.py +++ b/python/packages/autogen-core/src/autogen_core/components/models/__init__.py @@ -1,7 +1,3 @@ -import importlib -import warnings -from typing import TYPE_CHECKING, Any - from ._model_client import ChatCompletionClient, ModelCapabilities from ._types import ( AssistantMessage, @@ -17,13 +13,7 @@ UserMessage, ) -if TYPE_CHECKING: - from ._openai_client import AzureOpenAIChatCompletionClient, OpenAIChatCompletionClient - - __all__ = [ - "AzureOpenAIChatCompletionClient", - "OpenAIChatCompletionClient", "ModelCapabilities", "ChatCompletionClient", "SystemMessage", @@ -38,23 +28,3 @@ "TopLogprob", "ChatCompletionTokenLogprob", ] - - -def __getattr__(name: str) -> Any: - deprecated_classes = { - "AzureOpenAIChatCompletionClient": "autogen_ext.models.AzureOpenAIChatCompletionClient", - "OpenAIChatCompletionClient": "autogen_ext.modelsChatCompletionClient", - } - if name in deprecated_classes: - warnings.warn( - f"{name} moved to autogen_ext. " f"Please import it from {deprecated_classes[name]}.", - FutureWarning, - stacklevel=2, - ) - # Dynamically import the class from the current module - module = importlib.import_module("._openai_client", __name__) - attr = getattr(module, name) - # Cache the attribute in the module's global namespace - globals()[name] = attr - return attr - raise AttributeError(f"module {__name__} has no attribute {name}") diff --git a/python/packages/autogen-core/src/autogen_core/components/models/_model_info.py b/python/packages/autogen-core/src/autogen_core/components/models/_model_info.py deleted file mode 100644 index 2440d5b18682..000000000000 --- a/python/packages/autogen-core/src/autogen_core/components/models/_model_info.py +++ /dev/null @@ -1,122 +0,0 @@ -from typing import Dict - -from ._model_client import ModelCapabilities - -# Based on: https://platform.openai.com/docs/models/continuous-model-upgrades -# This is a moving target, so correctness is checked by the model value returned by openai against expected values at runtime`` -_MODEL_POINTERS = { - "gpt-4o": "gpt-4o-2024-08-06", - "gpt-4o-mini": "gpt-4o-mini-2024-07-18", - "gpt-4-turbo": "gpt-4-turbo-2024-04-09", - "gpt-4-turbo-preview": "gpt-4-0125-preview", - "gpt-4": "gpt-4-0613", - "gpt-4-32k": "gpt-4-32k-0613", - "gpt-3.5-turbo": "gpt-3.5-turbo-0125", - "gpt-3.5-turbo-16k": "gpt-3.5-turbo-16k-0613", -} - -_MODEL_CAPABILITIES: Dict[str, ModelCapabilities] = { - "gpt-4o-2024-08-06": { - "vision": True, - "function_calling": True, - "json_output": True, - }, - "gpt-4o-2024-05-13": { - "vision": True, - "function_calling": True, - "json_output": True, - }, - "gpt-4o-mini-2024-07-18": { - "vision": True, - "function_calling": True, - "json_output": True, - }, - "gpt-4-turbo-2024-04-09": { - "vision": True, - "function_calling": True, - "json_output": True, - }, - "gpt-4-0125-preview": { - "vision": False, - "function_calling": True, - "json_output": True, - }, - "gpt-4-1106-preview": { - "vision": False, - "function_calling": True, - "json_output": True, - }, - "gpt-4-1106-vision-preview": { - "vision": True, - "function_calling": False, - "json_output": False, - }, - "gpt-4-0613": { - "vision": False, - "function_calling": True, - "json_output": True, - }, - "gpt-4-32k-0613": { - "vision": False, - "function_calling": True, - "json_output": True, - }, - "gpt-3.5-turbo-0125": { - "vision": False, - "function_calling": True, - "json_output": True, - }, - "gpt-3.5-turbo-1106": { - "vision": False, - "function_calling": True, - "json_output": True, - }, - "gpt-3.5-turbo-instruct": { - "vision": False, - "function_calling": True, - "json_output": True, - }, - "gpt-3.5-turbo-0613": { - "vision": False, - "function_calling": True, - "json_output": True, - }, - "gpt-3.5-turbo-16k-0613": { - "vision": False, - "function_calling": True, - "json_output": True, - }, -} - -_MODEL_TOKEN_LIMITS: Dict[str, int] = { - "gpt-4o-2024-08-06": 128000, - "gpt-4o-2024-05-13": 128000, - "gpt-4o-mini-2024-07-18": 128000, - "gpt-4-turbo-2024-04-09": 128000, - "gpt-4-0125-preview": 128000, - "gpt-4-1106-preview": 128000, - "gpt-4-1106-vision-preview": 128000, - "gpt-4-0613": 8192, - "gpt-4-32k-0613": 32768, - "gpt-3.5-turbo-0125": 16385, - "gpt-3.5-turbo-1106": 16385, - "gpt-3.5-turbo-instruct": 4096, - "gpt-3.5-turbo-0613": 4096, - "gpt-3.5-turbo-16k-0613": 16385, -} - - -def resolve_model(model: str) -> str: - if model in _MODEL_POINTERS: - return _MODEL_POINTERS[model] - return model - - -def get_capabilities(model: str) -> ModelCapabilities: - resolved_model = resolve_model(model) - return _MODEL_CAPABILITIES[resolved_model] - - -def get_token_limit(model: str) -> int: - resolved_model = resolve_model(model) - return _MODEL_TOKEN_LIMITS[resolved_model] diff --git a/python/packages/autogen-core/src/autogen_core/components/models/_openai_client.py b/python/packages/autogen-core/src/autogen_core/components/models/_openai_client.py deleted file mode 100644 index 8ce8ddff2cbc..000000000000 --- a/python/packages/autogen-core/src/autogen_core/components/models/_openai_client.py +++ /dev/null @@ -1,901 +0,0 @@ -import asyncio -import inspect -import json -import logging -import math -import re -import warnings -from asyncio import Task -from typing import ( - Any, - AsyncGenerator, - Dict, - List, - Mapping, - Optional, - Sequence, - Set, - Type, - Union, - cast, -) - -import tiktoken -from openai import AsyncAzureOpenAI, AsyncOpenAI -from openai.types.chat import ( - ChatCompletion, - ChatCompletionAssistantMessageParam, - ChatCompletionContentPartParam, - ChatCompletionContentPartTextParam, - ChatCompletionMessageParam, - ChatCompletionMessageToolCallParam, - ChatCompletionRole, - ChatCompletionSystemMessageParam, - ChatCompletionToolMessageParam, - ChatCompletionToolParam, - ChatCompletionUserMessageParam, - ParsedChatCompletion, - ParsedChoice, - completion_create_params, -) -from openai.types.chat.chat_completion import Choice -from openai.types.chat.chat_completion_chunk import Choice as ChunkChoice -from openai.types.shared_params import FunctionDefinition, FunctionParameters -from pydantic import BaseModel -from typing_extensions import Unpack - -from ...application.logging import EVENT_LOGGER_NAME, TRACE_LOGGER_NAME -from ...application.logging.events import LLMCallEvent -from ...base import CancellationToken -from .. import ( - FunctionCall, - Image, -) -from ..tools import Tool, ToolSchema -from . import _model_info -from ._model_client import ChatCompletionClient, ModelCapabilities -from ._types import ( - AssistantMessage, - ChatCompletionTokenLogprob, - CreateResult, - FunctionExecutionResultMessage, - LLMMessage, - RequestUsage, - SystemMessage, - TopLogprob, - UserMessage, -) -from .config import AzureOpenAIClientConfiguration, OpenAIClientConfiguration - -logger = logging.getLogger(EVENT_LOGGER_NAME) -trace_logger = logging.getLogger(TRACE_LOGGER_NAME) - -openai_init_kwargs = set(inspect.getfullargspec(AsyncOpenAI.__init__).kwonlyargs) -aopenai_init_kwargs = set(inspect.getfullargspec(AsyncAzureOpenAI.__init__).kwonlyargs) - -create_kwargs = set(completion_create_params.CompletionCreateParamsBase.__annotations__.keys()) | set( - ("timeout", "stream") -) -# Only single choice allowed -disallowed_create_args = set(["stream", "messages", "function_call", "functions", "n"]) -required_create_args: Set[str] = set(["model"]) - - -def _azure_openai_client_from_config(config: Mapping[str, Any]) -> AsyncAzureOpenAI: - # Take a copy - copied_config = dict(config).copy() - - # Do some fixups - copied_config["azure_deployment"] = copied_config.get("azure_deployment", config.get("model")) - if copied_config["azure_deployment"] is not None: - copied_config["azure_deployment"] = copied_config["azure_deployment"].replace(".", "") - copied_config["azure_endpoint"] = copied_config.get("azure_endpoint", copied_config.pop("base_url", None)) - - # Shave down the config to just the AzureOpenAIChatCompletionClient kwargs - azure_config = {k: v for k, v in copied_config.items() if k in aopenai_init_kwargs} - return AsyncAzureOpenAI(**azure_config) - - -def _openai_client_from_config(config: Mapping[str, Any]) -> AsyncOpenAI: - # Shave down the config to just the OpenAI kwargs - openai_config = {k: v for k, v in config.items() if k in openai_init_kwargs} - return AsyncOpenAI(**openai_config) - - -def _create_args_from_config(config: Mapping[str, Any]) -> Dict[str, Any]: - create_args = {k: v for k, v in config.items() if k in create_kwargs} - create_args_keys = set(create_args.keys()) - if not required_create_args.issubset(create_args_keys): - raise ValueError(f"Required create args are missing: {required_create_args - create_args_keys}") - if disallowed_create_args.intersection(create_args_keys): - raise ValueError(f"Disallowed create args are present: {disallowed_create_args.intersection(create_args_keys)}") - return create_args - - -# TODO check types -# oai_system_message_schema = type2schema(ChatCompletionSystemMessageParam) -# oai_user_message_schema = type2schema(ChatCompletionUserMessageParam) -# oai_assistant_message_schema = type2schema(ChatCompletionAssistantMessageParam) -# oai_tool_message_schema = type2schema(ChatCompletionToolMessageParam) - - -def type_to_role(message: LLMMessage) -> ChatCompletionRole: - if isinstance(message, SystemMessage): - return "system" - elif isinstance(message, UserMessage): - return "user" - elif isinstance(message, AssistantMessage): - return "assistant" - else: - return "tool" - - -def user_message_to_oai(message: UserMessage) -> ChatCompletionUserMessageParam: - assert_valid_name(message.source) - if isinstance(message.content, str): - return ChatCompletionUserMessageParam( - content=message.content, - role="user", - name=message.source, - ) - else: - parts: List[ChatCompletionContentPartParam] = [] - for part in message.content: - if isinstance(part, str): - oai_part = ChatCompletionContentPartTextParam( - text=part, - type="text", - ) - parts.append(oai_part) - elif isinstance(part, Image): - # TODO: support url based images - # TODO: support specifying details - parts.append(part.to_openai_format()) - else: - raise ValueError(f"Unknown content type: {part}") - return ChatCompletionUserMessageParam( - content=parts, - role="user", - name=message.source, - ) - - -def system_message_to_oai(message: SystemMessage) -> ChatCompletionSystemMessageParam: - return ChatCompletionSystemMessageParam( - content=message.content, - role="system", - ) - - -def func_call_to_oai(message: FunctionCall) -> ChatCompletionMessageToolCallParam: - return ChatCompletionMessageToolCallParam( - id=message.id, - function={ - "arguments": message.arguments, - "name": message.name, - }, - type="function", - ) - - -def tool_message_to_oai( - message: FunctionExecutionResultMessage, -) -> Sequence[ChatCompletionToolMessageParam]: - return [ - ChatCompletionToolMessageParam(content=x.content, role="tool", tool_call_id=x.call_id) for x in message.content - ] - - -def assistant_message_to_oai( - message: AssistantMessage, -) -> ChatCompletionAssistantMessageParam: - assert_valid_name(message.source) - if isinstance(message.content, list): - return ChatCompletionAssistantMessageParam( - tool_calls=[func_call_to_oai(x) for x in message.content], - role="assistant", - name=message.source, - ) - else: - return ChatCompletionAssistantMessageParam( - content=message.content, - role="assistant", - name=message.source, - ) - - -def to_oai_type(message: LLMMessage) -> Sequence[ChatCompletionMessageParam]: - if isinstance(message, SystemMessage): - return [system_message_to_oai(message)] - elif isinstance(message, UserMessage): - return [user_message_to_oai(message)] - elif isinstance(message, AssistantMessage): - return [assistant_message_to_oai(message)] - else: - return tool_message_to_oai(message) - - -def calculate_vision_tokens(image: Image, detail: str = "auto") -> int: - MAX_LONG_EDGE = 2048 - BASE_TOKEN_COUNT = 85 - TOKENS_PER_TILE = 170 - MAX_SHORT_EDGE = 768 - TILE_SIZE = 512 - - if detail == "low": - return BASE_TOKEN_COUNT - - width, height = image.image.size - - # Scale down to fit within a MAX_LONG_EDGE x MAX_LONG_EDGE square if necessary - - if width > MAX_LONG_EDGE or height > MAX_LONG_EDGE: - aspect_ratio = width / height - if aspect_ratio > 1: - # Width is greater than height - width = MAX_LONG_EDGE - height = int(MAX_LONG_EDGE / aspect_ratio) - else: - # Height is greater than or equal to width - height = MAX_LONG_EDGE - width = int(MAX_LONG_EDGE * aspect_ratio) - - # Resize such that the shortest side is MAX_SHORT_EDGE if both dimensions exceed MAX_SHORT_EDGE - aspect_ratio = width / height - if width > MAX_SHORT_EDGE and height > MAX_SHORT_EDGE: - if aspect_ratio > 1: - # Width is greater than height - height = MAX_SHORT_EDGE - width = int(MAX_SHORT_EDGE * aspect_ratio) - else: - # Height is greater than or equal to width - width = MAX_SHORT_EDGE - height = int(MAX_SHORT_EDGE / aspect_ratio) - - # Calculate the number of tiles based on TILE_SIZE - - tiles_width = math.ceil(width / TILE_SIZE) - tiles_height = math.ceil(height / TILE_SIZE) - total_tiles = tiles_width * tiles_height - # Calculate the total tokens based on the number of tiles and the base token count - - total_tokens = BASE_TOKEN_COUNT + TOKENS_PER_TILE * total_tiles - - return total_tokens - - -def _add_usage(usage1: RequestUsage, usage2: RequestUsage) -> RequestUsage: - return RequestUsage( - prompt_tokens=usage1.prompt_tokens + usage2.prompt_tokens, - completion_tokens=usage1.completion_tokens + usage2.completion_tokens, - ) - - -def convert_tools( - tools: Sequence[Tool | ToolSchema], -) -> List[ChatCompletionToolParam]: - result: List[ChatCompletionToolParam] = [] - for tool in tools: - if isinstance(tool, Tool): - tool_schema = tool.schema - else: - assert isinstance(tool, dict) - tool_schema = tool - - result.append( - ChatCompletionToolParam( - type="function", - function=FunctionDefinition( - name=tool_schema["name"], - description=(tool_schema["description"] if "description" in tool_schema else ""), - parameters=( - cast(FunctionParameters, tool_schema["parameters"]) if "parameters" in tool_schema else {} - ), - ), - ) - ) - # Check if all tools have valid names. - for tool_param in result: - assert_valid_name(tool_param["function"]["name"]) - return result - - -def normalize_name(name: str) -> str: - """ - LLMs sometimes ask functions while ignoring their own format requirements, this function should be used to replace invalid characters with "_". - - Prefer _assert_valid_name for validating user configuration or input - """ - return re.sub(r"[^a-zA-Z0-9_-]", "_", name)[:64] - - -def assert_valid_name(name: str) -> str: - """ - Ensure that configured names are valid, raises ValueError if not. - - For munging LLM responses use _normalize_name to ensure LLM specified names don't break the API. - """ - if not re.match(r"^[a-zA-Z0-9_-]+$", name): - raise ValueError(f"Invalid name: {name}. Only letters, numbers, '_' and '-' are allowed.") - if len(name) > 64: - raise ValueError(f"Invalid name: {name}. Name must be less than 64 characters.") - return name - - -class BaseOpenAIChatCompletionClient(ChatCompletionClient): - def __init__( - self, - client: Union[AsyncOpenAI, AsyncAzureOpenAI], - create_args: Dict[str, Any], - model_capabilities: Optional[ModelCapabilities] = None, - ): - self._client = client - if model_capabilities is None and isinstance(client, AsyncAzureOpenAI): - raise ValueError("AzureOpenAIChatCompletionClient requires explicit model capabilities") - elif model_capabilities is None: - self._model_capabilities = _model_info.get_capabilities(create_args["model"]) - else: - self._model_capabilities = model_capabilities - - self._resolved_model: Optional[str] = None - if "model" in create_args: - self._resolved_model = _model_info.resolve_model(create_args["model"]) - - if ( - "response_format" in create_args - and create_args["response_format"]["type"] == "json_object" - and not self._model_capabilities["json_output"] - ): - raise ValueError("Model does not support JSON output") - - self._create_args = create_args - self._total_usage = RequestUsage(prompt_tokens=0, completion_tokens=0) - self._actual_usage = RequestUsage(prompt_tokens=0, completion_tokens=0) - - @classmethod - def create_from_config(cls, config: Dict[str, Any]) -> ChatCompletionClient: - return OpenAIChatCompletionClient(**config) - - async def create( - self, - messages: Sequence[LLMMessage], - tools: Sequence[Tool | ToolSchema] = [], - json_output: Optional[bool] = None, - extra_create_args: Mapping[str, Any] = {}, - cancellation_token: Optional[CancellationToken] = None, - ) -> CreateResult: - # Make sure all extra_create_args are valid - extra_create_args_keys = set(extra_create_args.keys()) - if not create_kwargs.issuperset(extra_create_args_keys): - raise ValueError(f"Extra create args are invalid: {extra_create_args_keys - create_kwargs}") - - # Copy the create args and overwrite anything in extra_create_args - create_args = self._create_args.copy() - create_args.update(extra_create_args) - - # Declare use_beta_client - use_beta_client: bool = False - response_format_value: Optional[Type[BaseModel]] = None - - if "response_format" in create_args: - value = create_args["response_format"] - # If value is a Pydantic model class, use the beta client - if isinstance(value, type) and issubclass(value, BaseModel): - response_format_value = value - use_beta_client = True - else: - # response_format_value is not a Pydantic model class - use_beta_client = False - response_format_value = None - - # Remove 'response_format' from create_args to prevent passing it twice - create_args_no_response_format = {k: v for k, v in create_args.items() if k != "response_format"} - - # TODO: allow custom handling. - # For now we raise an error if images are present and vision is not supported - if self.capabilities["vision"] is False: - for message in messages: - if isinstance(message, UserMessage): - if isinstance(message.content, list) and any(isinstance(x, Image) for x in message.content): - raise ValueError("Model does not support vision and image was provided") - - if json_output is not None: - if self.capabilities["json_output"] is False and json_output is True: - raise ValueError("Model does not support JSON output") - - if json_output is True: - create_args["response_format"] = {"type": "json_object"} - else: - create_args["response_format"] = {"type": "text"} - - if self.capabilities["json_output"] is False and json_output is True: - raise ValueError("Model does not support JSON output") - - oai_messages_nested = [to_oai_type(m) for m in messages] - oai_messages = [item for sublist in oai_messages_nested for item in sublist] - - if self.capabilities["function_calling"] is False and len(tools) > 0: - raise ValueError("Model does not support function calling") - future: Union[Task[ParsedChatCompletion[BaseModel]], Task[ChatCompletion]] - if len(tools) > 0: - converted_tools = convert_tools(tools) - if use_beta_client: - # Pass response_format_value if it's not None - if response_format_value is not None: - future = asyncio.ensure_future( - self._client.beta.chat.completions.parse( - messages=oai_messages, - tools=converted_tools, - response_format=response_format_value, - **create_args_no_response_format, - ) - ) - else: - future = asyncio.ensure_future( - self._client.beta.chat.completions.parse( - messages=oai_messages, - tools=converted_tools, - **create_args_no_response_format, - ) - ) - else: - future = asyncio.ensure_future( - self._client.chat.completions.create( - messages=oai_messages, - stream=False, - tools=converted_tools, - **create_args, - ) - ) - else: - if use_beta_client: - if response_format_value is not None: - future = asyncio.ensure_future( - self._client.beta.chat.completions.parse( - messages=oai_messages, - response_format=response_format_value, - **create_args_no_response_format, - ) - ) - else: - future = asyncio.ensure_future( - self._client.beta.chat.completions.parse( - messages=oai_messages, - **create_args_no_response_format, - ) - ) - else: - future = asyncio.ensure_future( - self._client.chat.completions.create( - messages=oai_messages, - stream=False, - **create_args, - ) - ) - - if cancellation_token is not None: - cancellation_token.link_future(future) - result: Union[ParsedChatCompletion[BaseModel], ChatCompletion] = await future - if use_beta_client: - result = cast(ParsedChatCompletion[Any], result) - - if result.usage is not None: - logger.info( - LLMCallEvent( - prompt_tokens=result.usage.prompt_tokens, - completion_tokens=result.usage.completion_tokens, - ) - ) - - usage = RequestUsage( - # TODO backup token counting - prompt_tokens=result.usage.prompt_tokens if result.usage is not None else 0, - completion_tokens=(result.usage.completion_tokens if result.usage is not None else 0), - ) - - if self._resolved_model is not None: - if self._resolved_model != result.model: - warnings.warn( - f"Resolved model mismatch: {self._resolved_model} != {result.model}. Model mapping may be incorrect.", - stacklevel=2, - ) - - # Limited to a single choice currently. - choice: Union[ParsedChoice[Any], ParsedChoice[BaseModel], Choice] = result.choices[0] - if choice.finish_reason == "function_call": - raise ValueError("Function calls are not supported in this context") - - content: Union[str, List[FunctionCall]] - if choice.finish_reason == "tool_calls": - assert choice.message.tool_calls is not None - assert choice.message.function_call is None - - # NOTE: If OAI response type changes, this will need to be updated - content = [ - FunctionCall( - id=x.id, - arguments=x.function.arguments, - name=normalize_name(x.function.name), - ) - for x in choice.message.tool_calls - ] - finish_reason = "function_calls" - else: - finish_reason = choice.finish_reason - content = choice.message.content or "" - logprobs: Optional[List[ChatCompletionTokenLogprob]] = None - if choice.logprobs and choice.logprobs.content: - logprobs = [ - ChatCompletionTokenLogprob( - token=x.token, - logprob=x.logprob, - top_logprobs=[TopLogprob(logprob=y.logprob, bytes=y.bytes) for y in x.top_logprobs], - bytes=x.bytes, - ) - for x in choice.logprobs.content - ] - response = CreateResult( - finish_reason=finish_reason, # type: ignore - content=content, - usage=usage, - cached=False, - logprobs=logprobs, - ) - - _add_usage(self._actual_usage, usage) - _add_usage(self._total_usage, usage) - - # TODO - why is this cast needed? - return response - - async def create_stream( - self, - messages: Sequence[LLMMessage], - tools: Sequence[Tool | ToolSchema] = [], - json_output: Optional[bool] = None, - extra_create_args: Mapping[str, Any] = {}, - cancellation_token: Optional[CancellationToken] = None, - ) -> AsyncGenerator[Union[str, CreateResult], None]: - """ - Creates an AsyncGenerator that will yield a stream of chat completions based on the provided messages and tools. - - Args: - messages (Sequence[LLMMessage]): A sequence of messages to be processed. - tools (Sequence[Tool | ToolSchema], optional): A sequence of tools to be used in the completion. Defaults to `[]`. - json_output (Optional[bool], optional): If True, the output will be in JSON format. Defaults to None. - extra_create_args (Mapping[str, Any], optional): Additional arguments for the creation process. Default to `{}`. - cancellation_token (Optional[CancellationToken], optional): A token to cancel the operation. Defaults to None. - - Yields: - AsyncGenerator[Union[str, CreateResult], None]: A generator yielding the completion results as they are produced. - - In streaming, the default behaviour is not return token usage counts. See: [OpenAI API reference for possible args](https://platform.openai.com/docs/api-reference/chat/create). - However `extra_create_args={"stream_options": {"include_usage": True}}` will (if supported by the accessed API) - return a final chunk with usage set to a RequestUsage object having prompt and completion token counts, - all preceding chunks will have usage as None. See: [stream_options](https://platform.openai.com/docs/api-reference/chat/create#chat-create-stream_options). - - Other examples of OPENAI supported arguments that can be included in `extra_create_args`: - - `temperature` (float): Controls the randomness of the output. Higher values (e.g., 0.8) make the output more random, while lower values (e.g., 0.2) make it more focused and deterministic. - - `max_tokens` (int): The maximum number of tokens to generate in the completion. - - `top_p` (float): An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. - - `frequency_penalty` (float): A value between -2.0 and 2.0 that penalizes new tokens based on their existing frequency in the text so far, decreasing the likelihood of repeated phrases. - - `presence_penalty` (float): A value between -2.0 and 2.0 that penalizes new tokens based on whether they appear in the text so far, encouraging the model to talk about new topics. - """ - # Make sure all extra_create_args are valid - extra_create_args_keys = set(extra_create_args.keys()) - if not create_kwargs.issuperset(extra_create_args_keys): - raise ValueError(f"Extra create args are invalid: {extra_create_args_keys - create_kwargs}") - - # Copy the create args and overwrite anything in extra_create_args - create_args = self._create_args.copy() - create_args.update(extra_create_args) - - oai_messages_nested = [to_oai_type(m) for m in messages] - oai_messages = [item for sublist in oai_messages_nested for item in sublist] - - # TODO: allow custom handling. - # For now we raise an error if images are present and vision is not supported - if self.capabilities["vision"] is False: - for message in messages: - if isinstance(message, UserMessage): - if isinstance(message.content, list) and any(isinstance(x, Image) for x in message.content): - raise ValueError("Model does not support vision and image was provided") - - if json_output is not None: - if self.capabilities["json_output"] is False and json_output is True: - raise ValueError("Model does not support JSON output") - - if json_output is True: - create_args["response_format"] = {"type": "json_object"} - else: - create_args["response_format"] = {"type": "text"} - - if len(tools) > 0: - converted_tools = convert_tools(tools) - stream_future = asyncio.ensure_future( - self._client.chat.completions.create( - messages=oai_messages, - stream=True, - tools=converted_tools, - **create_args, - ) - ) - else: - stream_future = asyncio.ensure_future( - self._client.chat.completions.create(messages=oai_messages, stream=True, **create_args) - ) - if cancellation_token is not None: - cancellation_token.link_future(stream_future) - stream = await stream_future - choice: Union[ParsedChoice[Any], ParsedChoice[BaseModel], ChunkChoice] = cast(ChunkChoice, None) - chunk = None - stop_reason = None - maybe_model = None - content_deltas: List[str] = [] - full_tool_calls: Dict[int, FunctionCall] = {} - completion_tokens = 0 - logprobs: Optional[List[ChatCompletionTokenLogprob]] = None - while True: - try: - chunk_future = asyncio.ensure_future(anext(stream)) - if cancellation_token is not None: - cancellation_token.link_future(chunk_future) - chunk = await chunk_future - - # to process usage chunk in streaming situations - # add stream_options={"include_usage": True} in the initialization of OpenAIChatCompletionClient(...) - # However the different api's - # OPENAI api usage chunk produces no choices so need to check if there is a choice - # liteLLM api usage chunk does produce choices - choice = ( - chunk.choices[0] - if len(chunk.choices) > 0 - else choice - if chunk.usage is not None and stop_reason is not None - else cast(ChunkChoice, None) - ) - - # for liteLLM chunk usage, do the following hack keeping the pervious chunk.stop_reason (if set). - # set the stop_reason for the usage chunk to the prior stop_reason - stop_reason = choice.finish_reason if chunk.usage is None and stop_reason is None else stop_reason - maybe_model = chunk.model - # First try get content - if choice.delta.content is not None: - content_deltas.append(choice.delta.content) - if len(choice.delta.content) > 0: - yield choice.delta.content - continue - - # Otherwise, get tool calls - if choice.delta.tool_calls is not None: - for tool_call_chunk in choice.delta.tool_calls: - idx = tool_call_chunk.index - if idx not in full_tool_calls: - # We ignore the type hint here because we want to fill in type when the delta provides it - full_tool_calls[idx] = FunctionCall(id="", arguments="", name="") - - if tool_call_chunk.id is not None: - full_tool_calls[idx].id += tool_call_chunk.id - - if tool_call_chunk.function is not None: - if tool_call_chunk.function.name is not None: - full_tool_calls[idx].name += tool_call_chunk.function.name - if tool_call_chunk.function.arguments is not None: - full_tool_calls[idx].arguments += tool_call_chunk.function.arguments - if choice.logprobs and choice.logprobs.content: - logprobs = [ - ChatCompletionTokenLogprob( - token=x.token, - logprob=x.logprob, - top_logprobs=[TopLogprob(logprob=y.logprob, bytes=y.bytes) for y in x.top_logprobs], - bytes=x.bytes, - ) - for x in choice.logprobs.content - ] - - except StopAsyncIteration: - break - - model = maybe_model or create_args["model"] - model = model.replace("gpt-35", "gpt-3.5") # hack for Azure API - - if chunk and chunk.usage: - prompt_tokens = chunk.usage.prompt_tokens - else: - prompt_tokens = 0 - - if stop_reason is None: - raise ValueError("No stop reason found") - - content: Union[str, List[FunctionCall]] - if len(content_deltas) > 1: - content = "".join(content_deltas) - if chunk and chunk.usage: - completion_tokens = chunk.usage.completion_tokens - else: - completion_tokens = 0 - else: - completion_tokens = 0 - # TODO: fix assumption that dict values were added in order and actually order by int index - # for tool_call in full_tool_calls.values(): - # # value = json.dumps(tool_call) - # # completion_tokens += count_token(value, model=model) - # completion_tokens += 0 - content = list(full_tool_calls.values()) - - usage = RequestUsage( - prompt_tokens=prompt_tokens, - completion_tokens=completion_tokens, - ) - if stop_reason == "function_call": - raise ValueError("Function calls are not supported in this context") - if stop_reason == "tool_calls": - stop_reason = "function_calls" - - result = CreateResult( - finish_reason=stop_reason, # type: ignore - content=content, - usage=usage, - cached=False, - logprobs=logprobs, - ) - - _add_usage(self._actual_usage, usage) - _add_usage(self._total_usage, usage) - - yield result - - def actual_usage(self) -> RequestUsage: - return self._actual_usage - - def total_usage(self) -> RequestUsage: - return self._total_usage - - def count_tokens(self, messages: Sequence[LLMMessage], tools: Sequence[Tool | ToolSchema] = []) -> int: - model = self._create_args["model"] - try: - encoding = tiktoken.encoding_for_model(model) - except KeyError: - trace_logger.warning(f"Model {model} not found. Using cl100k_base encoding.") - encoding = tiktoken.get_encoding("cl100k_base") - tokens_per_message = 3 - tokens_per_name = 1 - num_tokens = 0 - - # Message tokens. - for message in messages: - num_tokens += tokens_per_message - oai_message = to_oai_type(message) - for oai_message_part in oai_message: - for key, value in oai_message_part.items(): - if value is None: - continue - - if isinstance(message, UserMessage) and isinstance(value, list): - typed_message_value = cast(List[ChatCompletionContentPartParam], value) - - assert len(typed_message_value) == len( - message.content - ), "Mismatch in message content and typed message value" - - # We need image properties that are only in the original message - for part, content_part in zip(typed_message_value, message.content, strict=False): - if isinstance(content_part, Image): - # TODO: add detail parameter - num_tokens += calculate_vision_tokens(content_part) - elif isinstance(part, str): - num_tokens += len(encoding.encode(part)) - else: - try: - serialized_part = json.dumps(part) - num_tokens += len(encoding.encode(serialized_part)) - except TypeError: - trace_logger.warning(f"Could not convert {part} to string, skipping.") - else: - if not isinstance(value, str): - try: - value = json.dumps(value) - except TypeError: - trace_logger.warning(f"Could not convert {value} to string, skipping.") - continue - num_tokens += len(encoding.encode(value)) - if key == "name": - num_tokens += tokens_per_name - num_tokens += 3 # every reply is primed with <|start|>assistant<|message|> - - # Tool tokens. - oai_tools = convert_tools(tools) - for tool in oai_tools: - function = tool["function"] - tool_tokens = len(encoding.encode(function["name"])) - if "description" in function: - tool_tokens += len(encoding.encode(function["description"])) - tool_tokens -= 2 - if "parameters" in function: - parameters = function["parameters"] - if "properties" in parameters: - assert isinstance(parameters["properties"], dict) - for propertiesKey in parameters["properties"]: # pyright: ignore - assert isinstance(propertiesKey, str) - tool_tokens += len(encoding.encode(propertiesKey)) - v = parameters["properties"][propertiesKey] # pyright: ignore - for field in v: # pyright: ignore - if field == "type": - tool_tokens += 2 - tool_tokens += len(encoding.encode(v["type"])) # pyright: ignore - elif field == "description": - tool_tokens += 2 - tool_tokens += len(encoding.encode(v["description"])) # pyright: ignore - elif field == "enum": - tool_tokens -= 3 - for o in v["enum"]: # pyright: ignore - tool_tokens += 3 - tool_tokens += len(encoding.encode(o)) # pyright: ignore - else: - trace_logger.warning(f"Not supported field {field}") - tool_tokens += 11 - if len(parameters["properties"]) == 0: # pyright: ignore - tool_tokens -= 2 - num_tokens += tool_tokens - num_tokens += 12 - return num_tokens - - def remaining_tokens(self, messages: Sequence[LLMMessage], tools: Sequence[Tool | ToolSchema] = []) -> int: - token_limit = _model_info.get_token_limit(self._create_args["model"]) - return token_limit - self.count_tokens(messages, tools) - - @property - def capabilities(self) -> ModelCapabilities: - return self._model_capabilities - - -class OpenAIChatCompletionClient(BaseOpenAIChatCompletionClient): - def __init__(self, **kwargs: Unpack[OpenAIClientConfiguration]): - if "model" not in kwargs: - raise ValueError("model is required for OpenAIChatCompletionClient") - - model_capabilities: Optional[ModelCapabilities] = None - copied_args = dict(kwargs).copy() - if "model_capabilities" in kwargs: - model_capabilities = kwargs["model_capabilities"] - del copied_args["model_capabilities"] - - client = _openai_client_from_config(copied_args) - create_args = _create_args_from_config(copied_args) - self._raw_config = copied_args - super().__init__(client, create_args, model_capabilities) - - def __getstate__(self) -> Dict[str, Any]: - state = self.__dict__.copy() - state["_client"] = None - return state - - def __setstate__(self, state: Dict[str, Any]) -> None: - self.__dict__.update(state) - self._client = _openai_client_from_config(state["_raw_config"]) - - -class AzureOpenAIChatCompletionClient(BaseOpenAIChatCompletionClient): - def __init__(self, **kwargs: Unpack[AzureOpenAIClientConfiguration]): - if "model" not in kwargs: - raise ValueError("model is required for OpenAIChatCompletionClient") - - model_capabilities: Optional[ModelCapabilities] = None - copied_args = dict(kwargs).copy() - if "model_capabilities" in kwargs: - model_capabilities = kwargs["model_capabilities"] - del copied_args["model_capabilities"] - - client = _azure_openai_client_from_config(copied_args) - create_args = _create_args_from_config(copied_args) - self._raw_config = copied_args - super().__init__(client, create_args, model_capabilities) - - def __getstate__(self) -> Dict[str, Any]: - state = self.__dict__.copy() - state["_client"] = None - return state - - def __setstate__(self, state: Dict[str, Any]) -> None: - self.__dict__.update(state) - self._client = _azure_openai_client_from_config(state["_raw_config"]) diff --git a/python/packages/autogen-core/src/autogen_core/components/models/config/__init__.py b/python/packages/autogen-core/src/autogen_core/components/models/config/__init__.py deleted file mode 100644 index d1edcf8c62f9..000000000000 --- a/python/packages/autogen-core/src/autogen_core/components/models/config/__init__.py +++ /dev/null @@ -1,52 +0,0 @@ -from typing import Awaitable, Callable, Dict, List, Literal, Optional, Union - -from typing_extensions import Required, TypedDict - -from .._model_client import ModelCapabilities - - -class ResponseFormat(TypedDict): - type: Literal["text", "json_object"] - - -class CreateArguments(TypedDict, total=False): - frequency_penalty: Optional[float] - logit_bias: Optional[Dict[str, int]] - max_tokens: Optional[int] - n: Optional[int] - presence_penalty: Optional[float] - response_format: ResponseFormat - seed: Optional[int] - stop: Union[Optional[str], List[str]] - temperature: Optional[float] - top_p: Optional[float] - user: str - - -AsyncAzureADTokenProvider = Callable[[], Union[str, Awaitable[str]]] - - -class BaseOpenAIClientConfiguration(CreateArguments, total=False): - model: str - api_key: str - timeout: Union[float, None] - max_retries: int - - -# See OpenAI docs for explanation of these parameters -class OpenAIClientConfiguration(BaseOpenAIClientConfiguration, total=False): - organization: str - base_url: str - # Not required - model_capabilities: ModelCapabilities - - -class AzureOpenAIClientConfiguration(BaseOpenAIClientConfiguration, total=False): - # Azure specific - azure_endpoint: Required[str] - azure_deployment: str - api_version: Required[str] - azure_ad_token: str - azure_ad_token_provider: AsyncAzureADTokenProvider - # Must be provided - model_capabilities: Required[ModelCapabilities] diff --git a/python/packages/autogen-core/src/autogen_core/components/tools/_base.py b/python/packages/autogen-core/src/autogen_core/components/tools/_base.py index e9d8d2aa5b87..eb6a2067ebf5 100644 --- a/python/packages/autogen-core/src/autogen_core/components/tools/_base.py +++ b/python/packages/autogen-core/src/autogen_core/components/tools/_base.py @@ -1,7 +1,7 @@ import json from abc import ABC, abstractmethod from collections.abc import Sequence -from typing import Any, Dict, Generic, Mapping, Protocol, Type, TypedDict, TypeVar, runtime_checkable, cast +from typing import Any, Dict, Generic, Mapping, Protocol, Type, TypedDict, TypeVar, cast, runtime_checkable import jsonref from pydantic import BaseModel diff --git a/python/packages/autogen-core/src/autogen_core/components/tools/_function_tool.py b/python/packages/autogen-core/src/autogen_core/components/tools/_function_tool.py index 462374116d04..cc9145fab1e1 100644 --- a/python/packages/autogen-core/src/autogen_core/components/tools/_function_tool.py +++ b/python/packages/autogen-core/src/autogen_core/components/tools/_function_tool.py @@ -41,6 +41,7 @@ class FunctionTool(BaseTool[BaseModel, BaseModel]): from autogen_core.base import CancellationToken from autogen_core.components.tools import FunctionTool from typing_extensions import Annotated + import asyncio async def get_stock_price(ticker: str, date: Annotated[str, "Date in YYYY/MM/DD"]) -> float: @@ -48,15 +49,19 @@ async def get_stock_price(ticker: str, date: Annotated[str, "Date in YYYY/MM/DD" return random.uniform(10, 200) - # Initialize a FunctionTool instance for retrieving stock prices. - stock_price_tool = FunctionTool(get_stock_price, description="Fetch the stock price for a given ticker.") + async def example(): + # Initialize a FunctionTool instance for retrieving stock prices. + stock_price_tool = FunctionTool(get_stock_price, description="Fetch the stock price for a given ticker.") - # Execute the tool with cancellation support. - cancellation_token = CancellationToken() - result = await stock_price_tool.run_json({"ticker": "AAPL", "date": "2021/01/01"}, cancellation_token) + # Execute the tool with cancellation support. + cancellation_token = CancellationToken() + result = await stock_price_tool.run_json({"ticker": "AAPL", "date": "2021/01/01"}, cancellation_token) - # Output the result as a formatted string. - print(stock_price_tool.return_value_as_string(result)) + # Output the result as a formatted string. + print(stock_price_tool.return_value_as_string(result)) + + + asyncio.run(example()) """ def __init__(self, func: Callable[..., Any], description: str, name: str | None = None) -> None: diff --git a/python/packages/autogen-core/tests/protos/serialization_test.proto b/python/packages/autogen-core/tests/protos/serialization_test.proto new file mode 100644 index 000000000000..611100ccde12 --- /dev/null +++ b/python/packages/autogen-core/tests/protos/serialization_test.proto @@ -0,0 +1,11 @@ +syntax = "proto3"; + +package agents; + +message ProtoMessage { + string message = 1; +} +message NestingProtoMessage { + string message = 1; + ProtoMessage nested = 2; +} \ No newline at end of file diff --git a/python/packages/autogen-core/tests/protos/serialization_test_pb2.py b/python/packages/autogen-core/tests/protos/serialization_test_pb2.py new file mode 100644 index 000000000000..ebc4bfee7018 --- /dev/null +++ b/python/packages/autogen-core/tests/protos/serialization_test_pb2.py @@ -0,0 +1,28 @@ +# -*- coding: utf-8 -*- +# Generated by the protocol buffer compiler. DO NOT EDIT! +# source: serialization_test.proto +# Protobuf Python Version: 4.25.1 +"""Generated protocol buffer code.""" +from google.protobuf import descriptor as _descriptor +from google.protobuf import descriptor_pool as _descriptor_pool +from google.protobuf import symbol_database as _symbol_database +from google.protobuf.internal import builder as _builder +# @@protoc_insertion_point(imports) + +_sym_db = _symbol_database.Default() + + + + +DESCRIPTOR = _descriptor_pool.Default().AddSerializedFile(b'\n\x18serialization_test.proto\x12\x06\x61gents\"\x1f\n\x0cProtoMessage\x12\x0f\n\x07message\x18\x01 \x01(\t\"L\n\x13NestingProtoMessage\x12\x0f\n\x07message\x18\x01 \x01(\t\x12$\n\x06nested\x18\x02 \x01(\x0b\x32\x14.agents.ProtoMessageb\x06proto3') + +_globals = globals() +_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals) +_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, 'serialization_test_pb2', _globals) +if _descriptor._USE_C_DESCRIPTORS == False: + DESCRIPTOR._options = None + _globals['_PROTOMESSAGE']._serialized_start=36 + _globals['_PROTOMESSAGE']._serialized_end=67 + _globals['_NESTINGPROTOMESSAGE']._serialized_start=69 + _globals['_NESTINGPROTOMESSAGE']._serialized_end=145 +# @@protoc_insertion_point(module_scope) diff --git a/python/packages/autogen-core/tests/protos/serialization_test_pb2.pyi b/python/packages/autogen-core/tests/protos/serialization_test_pb2.pyi new file mode 100644 index 000000000000..b8a284663f6e --- /dev/null +++ b/python/packages/autogen-core/tests/protos/serialization_test_pb2.pyi @@ -0,0 +1,46 @@ +""" +@generated by mypy-protobuf. Do not edit manually! +isort:skip_file +""" + +import builtins +import google.protobuf.descriptor +import google.protobuf.message +import typing + +DESCRIPTOR: google.protobuf.descriptor.FileDescriptor + +@typing.final +class ProtoMessage(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + MESSAGE_FIELD_NUMBER: builtins.int + message: builtins.str + def __init__( + self, + *, + message: builtins.str = ..., + ) -> None: ... + def ClearField(self, field_name: typing.Literal["message", b"message"]) -> None: ... + +global___ProtoMessage = ProtoMessage + +@typing.final +class NestingProtoMessage(google.protobuf.message.Message): + DESCRIPTOR: google.protobuf.descriptor.Descriptor + + MESSAGE_FIELD_NUMBER: builtins.int + NESTED_FIELD_NUMBER: builtins.int + message: builtins.str + @property + def nested(self) -> global___ProtoMessage: ... + def __init__( + self, + *, + message: builtins.str = ..., + nested: global___ProtoMessage | None = ..., + ) -> None: ... + def HasField(self, field_name: typing.Literal["nested", b"nested"]) -> builtins.bool: ... + def ClearField(self, field_name: typing.Literal["message", b"message", "nested", b"nested"]) -> None: ... + +global___NestingProtoMessage = NestingProtoMessage diff --git a/python/packages/autogen-core/tests/protos/serialization_test_pb2_grpc.py b/python/packages/autogen-core/tests/protos/serialization_test_pb2_grpc.py new file mode 100644 index 000000000000..2daafffebfc8 --- /dev/null +++ b/python/packages/autogen-core/tests/protos/serialization_test_pb2_grpc.py @@ -0,0 +1,4 @@ +# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! +"""Client and server classes corresponding to protobuf-defined services.""" +import grpc + diff --git a/python/packages/autogen-core/tests/protos/serialization_test_pb2_grpc.pyi b/python/packages/autogen-core/tests/protos/serialization_test_pb2_grpc.pyi new file mode 100644 index 000000000000..a6a9cff9dfd4 --- /dev/null +++ b/python/packages/autogen-core/tests/protos/serialization_test_pb2_grpc.pyi @@ -0,0 +1,17 @@ +""" +@generated by mypy-protobuf. Do not edit manually! +isort:skip_file +""" + +import abc +import collections.abc +import grpc +import grpc.aio +import typing + +_T = typing.TypeVar("_T") + +class _MaybeAsyncIterator(collections.abc.AsyncIterator[_T], collections.abc.Iterator[_T], metaclass=abc.ABCMeta): ... + +class _ServicerContext(grpc.ServicerContext, grpc.aio.ServicerContext): # type: ignore[misc, type-arg] + ... diff --git a/python/packages/autogen-core/tests/test_serialization.py b/python/packages/autogen-core/tests/test_serialization.py index 3f3b0174c8b0..6b5568411f6f 100644 --- a/python/packages/autogen-core/tests/test_serialization.py +++ b/python/packages/autogen-core/tests/test_serialization.py @@ -11,6 +11,7 @@ from autogen_core.base._serialization import DataclassJsonMessageSerializer, PydanticJsonMessageSerializer from autogen_core.components import Image from PIL import Image as PILImage +from protos.serialization_test_pb2 import NestingProtoMessage, ProtoMessage from pydantic import BaseModel @@ -83,6 +84,36 @@ def test_nesting_dataclass_dataclass() -> None: serde.add_serializer(try_get_known_serializers_for_type(NestingDataclassMessage)) +def test_proto() -> None: + serde = SerializationRegistry() + serde.add_serializer(try_get_known_serializers_for_type(ProtoMessage)) + + message = ProtoMessage(message="hello") + name = serde.type_name(message) + # TODO: should be PROTO_DATA_CONTENT_TYPE + data = serde.serialize(message, type_name=name, data_content_type=JSON_DATA_CONTENT_TYPE) + assert name == "ProtoMessage" + # TODO: assert data == stuff + deserialized = serde.deserialize(data, type_name=name, data_content_type=JSON_DATA_CONTENT_TYPE) + assert deserialized == message + + +def test_nested_proto() -> None: + serde = SerializationRegistry() + serde.add_serializer(try_get_known_serializers_for_type(NestingProtoMessage)) + + message = NestingProtoMessage(message="hello", nested=ProtoMessage(message="world")) + name = serde.type_name(message) + + # TODO: should be PROTO_DATA_CONTENT_TYPE + data = serde.serialize(message, type_name=name, data_content_type=JSON_DATA_CONTENT_TYPE) + + # TODO: assert data == stuff + + deserialized = serde.deserialize(data, type_name=name, data_content_type=JSON_DATA_CONTENT_TYPE) + assert deserialized == message + + @dataclass class DataclassNestedUnionSyntaxOldMessage: message: Union[str, int] diff --git a/python/packages/autogen-core/tests/test_tool_agent.py b/python/packages/autogen-core/tests/test_tool_agent.py index 322fdf6b7941..6184e9c78c83 100644 --- a/python/packages/autogen-core/tests/test_tool_agent.py +++ b/python/packages/autogen-core/tests/test_tool_agent.py @@ -1,6 +1,6 @@ import asyncio import json -from typing import Any, AsyncGenerator, List +from typing import Any, AsyncGenerator, List, Mapping, Optional, Sequence, Union import pytest from autogen_core.application import SingleThreadedAgentRuntime @@ -8,9 +8,13 @@ from autogen_core.components import FunctionCall from autogen_core.components.models import ( AssistantMessage, + ChatCompletionClient, + CreateResult, FunctionExecutionResult, FunctionExecutionResultMessage, - OpenAIChatCompletionClient, + LLMMessage, + ModelCapabilities, + RequestUsage, UserMessage, ) from autogen_core.components.tool_agent import ( @@ -20,13 +24,7 @@ ToolNotFoundException, tool_agent_caller_loop, ) -from autogen_core.components.tools import FunctionTool, Tool -from openai.resources.chat.completions import AsyncCompletions -from openai.types.chat.chat_completion import ChatCompletion, Choice -from openai.types.chat.chat_completion_chunk import ChatCompletionChunk -from openai.types.chat.chat_completion_message import ChatCompletionMessage -from openai.types.chat.chat_completion_message_tool_call import ChatCompletionMessageToolCall, Function -from openai.types.completion_usage import CompletionUsage +from autogen_core.components.tools import FunctionTool, Tool, ToolSchema def _pass_function(input: str) -> str: @@ -42,60 +40,6 @@ async def _async_sleep_function(input: str) -> str: return "pass" -class _MockChatCompletion: - def __init__(self, model: str = "gpt-4o") -> None: - self._saved_chat_completions: List[ChatCompletion] = [ - ChatCompletion( - id="id1", - choices=[ - Choice( - finish_reason="tool_calls", - index=0, - message=ChatCompletionMessage( - content=None, - tool_calls=[ - ChatCompletionMessageToolCall( - id="1", - type="function", - function=Function( - name="pass", - arguments=json.dumps({"input": "pass"}), - ), - ) - ], - role="assistant", - ), - ) - ], - created=0, - model=model, - object="chat.completion", - usage=CompletionUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0), - ), - ChatCompletion( - id="id2", - choices=[ - Choice( - finish_reason="stop", index=0, message=ChatCompletionMessage(content="Hello", role="assistant") - ) - ], - created=0, - model=model, - object="chat.completion", - usage=CompletionUsage(prompt_tokens=0, completion_tokens=0, total_tokens=0), - ), - ] - self._curr_index = 0 - - async def mock_create( - self, *args: Any, **kwargs: Any - ) -> ChatCompletion | AsyncGenerator[ChatCompletionChunk, None]: - await asyncio.sleep(0.1) - completion = self._saved_chat_completions[self._curr_index] - self._curr_index += 1 - return completion - - @pytest.mark.asyncio async def test_tool_agent() -> None: runtime = SingleThreadedAgentRuntime() @@ -144,10 +88,59 @@ async def test_tool_agent() -> None: @pytest.mark.asyncio -async def test_caller_loop(monkeypatch: pytest.MonkeyPatch) -> None: - mock = _MockChatCompletion(model="gpt-4o-2024-05-13") - monkeypatch.setattr(AsyncCompletions, "create", mock.mock_create) - client = OpenAIChatCompletionClient(model="gpt-4o-2024-05-13", api_key="api_key") +async def test_caller_loop() -> None: + class MockChatCompletionClient(ChatCompletionClient): + async def create( + self, + messages: Sequence[LLMMessage], + tools: Sequence[Tool | ToolSchema] = [], + json_output: Optional[bool] = None, + extra_create_args: Mapping[str, Any] = {}, + cancellation_token: Optional[CancellationToken] = None, + ) -> CreateResult: + if len(messages) == 1: + return CreateResult( + content=[FunctionCall(id="1", name="pass", arguments=json.dumps({"input": "test"}))], + finish_reason="stop", + usage=RequestUsage(prompt_tokens=0, completion_tokens=0), + cached=False, + logprobs=None, + ) + return CreateResult( + content="Done", + finish_reason="stop", + usage=RequestUsage(prompt_tokens=0, completion_tokens=0), + cached=False, + logprobs=None, + ) + + def create_stream( + self, + messages: Sequence[LLMMessage], + tools: Sequence[Tool | ToolSchema] = [], + json_output: Optional[bool] = None, + extra_create_args: Mapping[str, Any] = {}, + cancellation_token: Optional[CancellationToken] = None, + ) -> AsyncGenerator[Union[str, CreateResult], None]: + raise NotImplementedError() + + def actual_usage(self) -> RequestUsage: + return RequestUsage(prompt_tokens=0, completion_tokens=0) + + def total_usage(self) -> RequestUsage: + return RequestUsage(prompt_tokens=0, completion_tokens=0) + + def count_tokens(self, messages: Sequence[LLMMessage], tools: Sequence[Tool | ToolSchema] = []) -> int: + return 0 + + def remaining_tokens(self, messages: Sequence[LLMMessage], tools: Sequence[Tool | ToolSchema] = []) -> int: + return 0 + + @property + def capabilities(self) -> ModelCapabilities: + return ModelCapabilities(vision=False, function_calling=True, json_output=False) + + client = MockChatCompletionClient() tools: List[Tool] = [FunctionTool(_pass_function, name="pass", description="Pass function")] runtime = SingleThreadedAgentRuntime() await runtime.register( diff --git a/python/packages/autogen-core/tests/test_tools.py b/python/packages/autogen-core/tests/test_tools.py index 867fd012d577..e7995969802f 100644 --- a/python/packages/autogen-core/tests/test_tools.py +++ b/python/packages/autogen-core/tests/test_tools.py @@ -1,12 +1,11 @@ import inspect from typing import Annotated, List -from autogen_core.components.tools._base import ToolSchema import pytest from autogen_core.base import CancellationToken from autogen_core.components._function_utils import get_typed_signature -from autogen_core.components.models._openai_client import convert_tools from autogen_core.components.tools import BaseTool, FunctionTool +from autogen_core.components.tools._base import ToolSchema from pydantic import BaseModel, Field, model_serializer from pydantic_core import PydanticUndefined @@ -142,7 +141,7 @@ def my_function() -> str: sig = get_typed_signature(my_function) assert isinstance(sig, inspect.Signature) assert len(sig.parameters) == 0 - assert sig.return_annotation == str + assert sig.return_annotation is str def test_get_typed_signature_annotated() -> None: @@ -162,7 +161,7 @@ def my_function() -> "str": sig = get_typed_signature(my_function) assert isinstance(sig, inspect.Signature) assert len(sig.parameters) == 0 - assert sig.return_annotation == str + assert sig.return_annotation is str def test_func_tool() -> None: @@ -187,11 +186,11 @@ def my_function(my_arg: Annotated[str, "test description"]) -> str: assert issubclass(tool.args_type(), BaseModel) assert issubclass(tool.return_type(), str) assert tool.args_type().model_fields["my_arg"].description == "test description" - assert tool.args_type().model_fields["my_arg"].annotation == str + assert tool.args_type().model_fields["my_arg"].annotation is str assert tool.args_type().model_fields["my_arg"].is_required() is True assert tool.args_type().model_fields["my_arg"].default is PydanticUndefined assert len(tool.args_type().model_fields) == 1 - assert tool.return_type() == str + assert tool.return_type() is str assert tool.state_type() is None @@ -203,7 +202,7 @@ def my_function() -> Annotated[str, "test description"]: assert tool.name == "my_function" assert tool.description == "Function tool." assert issubclass(tool.args_type(), BaseModel) - assert tool.return_type() == str + assert tool.return_type() is str assert tool.state_type() is None @@ -216,7 +215,7 @@ def my_function() -> str: assert tool.description == "Function tool." assert issubclass(tool.args_type(), BaseModel) assert len(tool.args_type().model_fields) == 0 - assert tool.return_type() == str + assert tool.return_type() is str assert tool.state_type() is None @@ -323,29 +322,6 @@ def my_function(arg: int) -> int: assert tool.return_value_as_string(result) == "5" -def test_convert_tools_accepts_both_func_tool_and_schema() -> None: - def my_function(arg: str, other: Annotated[int, "int arg"], nonrequired: int = 5) -> MyResult: - return MyResult(result="test") - - tool = FunctionTool(my_function, description="Function tool.") - schema = tool.schema - - converted_tool_schema = convert_tools([tool, schema]) - - assert len(converted_tool_schema) == 2 - assert converted_tool_schema[0] == converted_tool_schema[1] - - -def test_convert_tools_accepts_both_tool_and_schema() -> None: - tool = MyTool() - schema = tool.schema - - converted_tool_schema = convert_tools([tool, schema]) - - assert len(converted_tool_schema) == 2 - assert converted_tool_schema[0] == converted_tool_schema[1] - - @pytest.mark.asyncio async def test_func_tool_return_list() -> None: def my_function() -> List[int]: diff --git a/python/packages/autogen-ext/pyproject.toml b/python/packages/autogen-ext/pyproject.toml index f9d741842d7f..3d263df51a50 100644 --- a/python/packages/autogen-ext/pyproject.toml +++ b/python/packages/autogen-ext/pyproject.toml @@ -4,7 +4,7 @@ build-backend = "hatchling.build" [project] name = "autogen-ext" -version = "0.4.0.dev6" +version = "0.4.0.dev7" license = {file = "LICENSE-CODE"} description = "AutoGen extensions library" readme = "README.md" @@ -15,7 +15,7 @@ classifiers = [ "Operating System :: OS Independent", ] dependencies = [ - "autogen-core==0.4.0.dev6", + "autogen-core==0.4.0.dev7", ] diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/__init__.py b/python/packages/autogen-ext/src/autogen_ext/agents/__init__.py index d89a890ab2bf..d595e0180e6c 100644 --- a/python/packages/autogen-ext/src/autogen_ext/agents/__init__.py +++ b/python/packages/autogen-ext/src/autogen_ext/agents/__init__.py @@ -1,3 +1,4 @@ +from ._openai_assistant_agent import OpenAIAssistantAgent from .web_surfer._multimodal_web_surfer import MultimodalWebSurfer -__all__ = ["MultimodalWebSurfer"] +__all__ = ["MultimodalWebSurfer", "OpenAIAssistantAgent"] diff --git a/python/packages/autogen-ext/src/autogen_ext/agents/_openai_assistant_agent.py b/python/packages/autogen-ext/src/autogen_ext/agents/_openai_assistant_agent.py new file mode 100644 index 000000000000..7e1124728fbf --- /dev/null +++ b/python/packages/autogen-ext/src/autogen_ext/agents/_openai_assistant_agent.py @@ -0,0 +1,562 @@ +import asyncio +import json +import logging +import os +from typing import ( + Any, + AsyncGenerator, + Awaitable, + Callable, + Dict, + Iterable, + List, + Literal, + Optional, + Sequence, + Union, + cast, +) + +import aiofiles +from autogen_agentchat import EVENT_LOGGER_NAME +from autogen_agentchat.agents import BaseChatAgent +from autogen_agentchat.base import Response +from autogen_agentchat.messages import ( + AgentMessage, + ChatMessage, + HandoffMessage, + MultiModalMessage, + StopMessage, + TextMessage, + ToolCallMessage, + ToolCallResultMessage, +) +from autogen_core.base import CancellationToken +from autogen_core.components import FunctionCall +from autogen_core.components.models._types import FunctionExecutionResult +from autogen_core.components.tools import FunctionTool, Tool +from openai import NOT_GIVEN, AsyncClient, NotGiven +from openai.pagination import AsyncCursorPage +from openai.resources.beta.threads import AsyncMessages, AsyncRuns, AsyncThreads +from openai.types import FileObject +from openai.types.beta import thread_update_params +from openai.types.beta.assistant import Assistant +from openai.types.beta.assistant_response_format_option_param import AssistantResponseFormatOptionParam +from openai.types.beta.assistant_tool_param import AssistantToolParam +from openai.types.beta.code_interpreter_tool_param import CodeInterpreterToolParam +from openai.types.beta.file_search_tool_param import FileSearchToolParam +from openai.types.beta.function_tool_param import FunctionToolParam +from openai.types.beta.thread import Thread, ToolResources, ToolResourcesCodeInterpreter +from openai.types.beta.threads import Message, MessageDeleted, Run +from openai.types.beta.vector_store import VectorStore +from openai.types.shared_params.function_definition import FunctionDefinition + +event_logger = logging.getLogger(EVENT_LOGGER_NAME) + + +def _convert_tool_to_function_param(tool: Tool) -> FunctionToolParam: + """Convert an autogen Tool to an OpenAI Assistant function tool parameter.""" + schema = tool.schema + parameters: Dict[str, object] = {} + if "parameters" in schema: + parameters = { + "type": schema["parameters"]["type"], + "properties": schema["parameters"]["properties"], + } + if "required" in schema["parameters"]: + parameters["required"] = schema["parameters"]["required"] + + function_def = FunctionDefinition( + name=schema["name"], + description=schema.get("description", ""), + parameters=parameters, + ) + return FunctionToolParam(type="function", function=function_def) + + +class OpenAIAssistantAgent(BaseChatAgent): + """An agent implementation that uses the OpenAI Assistant API to generate responses. + + This agent leverages the OpenAI Assistant API to create AI assistants with capabilities like: + - Code interpretation and execution + - File handling and search + - Custom function calling + - Multi-turn conversations + + The agent maintains a thread of conversation and can use various tools including: + - Code interpreter: For executing code and working with files + - File search: For searching through uploaded documents + - Custom functions: For extending capabilities with user-defined tools + + .. note:: + + The agent deletes all messages in the thread when :meth:`on_reset` is called. + + Key Features: + - Supports multiple file formats including code, documents, images + - Can handle up to 128 tools per assistant + - Maintains conversation context in threads + - Supports file uploads for code interpreter and search + - Vector store integration for efficient file search + - Automatic file parsing and embedding + + Example: + .. code-block:: python + + from openai import AsyncClient + from autogen_core.base import CancellationToken + import asyncio + from autogen_ext.agents import OpenAIAssistantAgent + from autogen_agentchat.messages import TextMessage + + + async def example(): + cancellation_token = CancellationToken() + + # Create an OpenAI client + client = AsyncClient(api_key="your-api-key", base_url="your-base-url") + + # Create an assistant with code interpreter + assistant = OpenAIAssistantAgent( + name="Python Helper", + description="Helps with Python programming", + client=client, + model="gpt-4", + instructions="You are a helpful Python programming assistant.", + tools=["code_interpreter"], + ) + + # Upload files for the assistant to use + await assistant.on_upload_for_code_interpreter("data.csv", cancellation_token) + + # Get response from the assistant + _response = await assistant.on_messages( + [TextMessage(source="user", content="Analyze the data in data.csv")], cancellation_token + ) + + # Clean up resources + await assistant.delete_uploaded_files(cancellation_token) + await assistant.delete_assistant(cancellation_token) + + + asyncio.run(example()) + + Args: + name (str): Name of the assistant + description (str): Description of the assistant's purpose + client (AsyncClient): OpenAI API client instance + model (str): Model to use (e.g. "gpt-4") + instructions (str): System instructions for the assistant + tools (Optional[Iterable[Union[Literal["code_interpreter", "file_search"], Tool | Callable[..., Any] | Callable[..., Awaitable[Any]]]]]): Tools the assistant can use + assistant_id (Optional[str]): ID of existing assistant to use + metadata (Optional[object]): Additional metadata for the assistant + response_format (Optional[AssistantResponseFormatOptionParam]): Response format settings + temperature (Optional[float]): Temperature for response generation + tool_resources (Optional[ToolResources]): Additional tool configuration + top_p (Optional[float]): Top p sampling parameter + """ + + def __init__( + self, + name: str, + description: str, + client: AsyncClient, + model: str, + instructions: str, + tools: Optional[ + Iterable[ + Union[ + Literal["code_interpreter", "file_search"], + Tool | Callable[..., Any] | Callable[..., Awaitable[Any]], + ] + ] + ] = None, + assistant_id: Optional[str] = None, + thread_id: Optional[str] = None, + metadata: Optional[object] = None, + response_format: Optional[AssistantResponseFormatOptionParam] = None, + temperature: Optional[float] = None, + tool_resources: Optional[ToolResources] = None, + top_p: Optional[float] = None, + ) -> None: + super().__init__(name, description) + if tools is None: + tools = [] + + # Store original tools and converted tools separately + self._original_tools: List[Tool] = [] + converted_tools: List[AssistantToolParam] = [] + for tool in tools: + if isinstance(tool, str): + if tool == "code_interpreter": + converted_tools.append(CodeInterpreterToolParam(type="code_interpreter")) + elif tool == "file_search": + converted_tools.append(FileSearchToolParam(type="file_search")) + elif isinstance(tool, Tool): + self._original_tools.append(tool) + converted_tools.append(_convert_tool_to_function_param(tool)) + elif callable(tool): + if hasattr(tool, "__doc__") and tool.__doc__ is not None: + description = tool.__doc__ + else: + description = "" + function_tool = FunctionTool(tool, description=description) + self._original_tools.append(function_tool) + converted_tools.append(_convert_tool_to_function_param(function_tool)) + else: + raise ValueError(f"Unsupported tool type: {type(tool)}") + + self._client = client + self._assistant: Optional[Assistant] = None + self._thread: Optional[Thread] = None + self._init_thread_id = thread_id + self._model = model + self._instructions = instructions + self._api_tools = converted_tools + self._assistant_id = assistant_id + self._metadata = metadata + self._response_format = response_format + self._temperature = temperature + self._tool_resources = tool_resources + self._top_p = top_p + self._vector_store_id: Optional[str] = None + self._uploaded_file_ids: List[str] = [] + + async def _ensure_initialized(self) -> None: + """Ensure assistant and thread are created.""" + if self._assistant is None: + if self._assistant_id: + self._assistant = await self._client.beta.assistants.retrieve(assistant_id=self._assistant_id) + else: + self._assistant = await self._client.beta.assistants.create( + model=self._model, + description=self.description, + instructions=self._instructions, + tools=self._api_tools, + metadata=self._metadata, + response_format=self._response_format if self._response_format else NOT_GIVEN, # type: ignore + temperature=self._temperature, + tool_resources=self._tool_resources if self._tool_resources else NOT_GIVEN, # type: ignore + top_p=self._top_p, + ) + + if self._thread is None: + if self._init_thread_id: + self._thread = await self._client.beta.threads.retrieve(thread_id=self._init_thread_id) + else: + self._thread = await self._client.beta.threads.create() + + @property + def produced_message_types(self) -> List[type[ChatMessage]]: + """The types of messages that the assistant agent produces.""" + return [TextMessage] + + @property + def threads(self) -> AsyncThreads: + return self._client.beta.threads + + @property + def runs(self) -> AsyncRuns: + return self._client.beta.threads.runs + + @property + def messages(self) -> AsyncMessages: + return self._client.beta.threads.messages + + @property + def _get_assistant_id(self) -> str: + if self._assistant is None: + raise ValueError("Assistant not initialized") + return self._assistant.id + + @property + def _thread_id(self) -> str: + if self._thread is None: + raise ValueError("Thread not initialized") + return self._thread.id + + async def _execute_tool_call(self, tool_call: FunctionCall, cancellation_token: CancellationToken) -> str: + """Execute a tool call and return the result.""" + try: + if not self._original_tools: + raise ValueError("No tools are available.") + tool = next((t for t in self._original_tools if t.name == tool_call.name), None) + if tool is None: + raise ValueError(f"The tool '{tool_call.name}' is not available.") + arguments = json.loads(tool_call.arguments) + result = await tool.run_json(arguments, cancellation_token) + return tool.return_value_as_string(result) + except Exception as e: + return f"Error: {e}" + + async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: + """Handle incoming messages and return a response.""" + async for message in self.on_messages_stream(messages, cancellation_token): + if isinstance(message, Response): + return message + raise AssertionError("The stream should have returned the final result.") + + async def on_messages_stream( + self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken + ) -> AsyncGenerator[AgentMessage | Response, None]: + """Handle incoming messages and return a response.""" + await self._ensure_initialized() + + # Process all messages in sequence + for message in messages: + if isinstance(message, (TextMessage, MultiModalMessage)): + await self.handle_text_message(str(message.content), cancellation_token) + elif isinstance(message, (StopMessage, HandoffMessage)): + await self.handle_text_message(message.content, cancellation_token) + + # Inner messages for tool calls + inner_messages: List[AgentMessage] = [] + + # Create and start a run + run: Run = await cancellation_token.link_future( + asyncio.ensure_future( + self._client.beta.threads.runs.create( + thread_id=self._thread_id, + assistant_id=self._get_assistant_id, + ) + ) + ) + + # Wait for run completion by polling + while True: + run = await cancellation_token.link_future( + asyncio.ensure_future( + self._client.beta.threads.runs.retrieve( + thread_id=self._thread_id, + run_id=run.id, + ) + ) + ) + + if run.status == "failed": + raise ValueError(f"Run failed: {run.last_error}") + + # If the run requires action (function calls), execute tools and continue + if run.status == "requires_action" and run.required_action is not None: + tool_calls: List[FunctionCall] = [] + for required_tool_call in run.required_action.submit_tool_outputs.tool_calls: + if required_tool_call.type == "function": + tool_calls.append( + FunctionCall( + id=required_tool_call.id, + name=required_tool_call.function.name, + arguments=required_tool_call.function.arguments, + ) + ) + + # Add tool call message to inner messages + tool_call_msg = ToolCallMessage(source=self.name, content=tool_calls) + inner_messages.append(tool_call_msg) + event_logger.debug(tool_call_msg) + yield tool_call_msg + + # Execute tool calls and get results + tool_outputs: List[FunctionExecutionResult] = [] + for tool_call in tool_calls: + result = await self._execute_tool_call(tool_call, cancellation_token) + tool_outputs.append(FunctionExecutionResult(content=result, call_id=tool_call.id)) + + # Add tool result message to inner messages + tool_result_msg = ToolCallResultMessage(source=self.name, content=tool_outputs) + inner_messages.append(tool_result_msg) + event_logger.debug(tool_result_msg) + yield tool_result_msg + + # Submit tool outputs back to the run + run = await cancellation_token.link_future( + asyncio.ensure_future( + self._client.beta.threads.runs.submit_tool_outputs( + thread_id=self._thread_id, + run_id=run.id, + tool_outputs=[{"tool_call_id": t.call_id, "output": t.content} for t in tool_outputs], + ) + ) + ) + continue + + if run.status == "completed": + break + + await asyncio.sleep(0.5) + + # Get messages after run completion + assistant_messages: AsyncCursorPage[Message] = await cancellation_token.link_future( + asyncio.ensure_future( + self._client.beta.threads.messages.list(thread_id=self._thread_id, order="desc", limit=1) + ) + ) + + if not assistant_messages.data: + raise ValueError("No messages received from assistant") + + # Get the last message's content + last_message = assistant_messages.data[0] + if not last_message.content: + raise ValueError(f"No content in the last message: {last_message}") + + # Extract text content + text_content = [content for content in last_message.content if content.type == "text"] + if not text_content: + raise ValueError(f"Expected text content in the last message: {last_message.content}") + + # Return the assistant's response as a Response with inner messages + chat_message = TextMessage(source=self.name, content=text_content[0].text.value) + yield Response(chat_message=chat_message, inner_messages=inner_messages) + + async def handle_text_message(self, content: str, cancellation_token: CancellationToken) -> None: + """Handle regular text messages by adding them to the thread.""" + await cancellation_token.link_future( + asyncio.ensure_future( + self._client.beta.threads.messages.create( + thread_id=self._thread_id, + content=content, + role="user", + ) + ) + ) + + async def on_reset(self, cancellation_token: CancellationToken) -> None: + """Handle reset command by deleting all messages in the thread.""" + # Retrieve all message IDs in the thread + all_msgs: List[str] = [] + after: str | NotGiven = NOT_GIVEN + while True: + msgs: AsyncCursorPage[Message] = await cancellation_token.link_future( + asyncio.ensure_future(self._client.beta.threads.messages.list(self._thread_id, after=after)) + ) + for msg in msgs.data: + all_msgs.append(msg.id) + after = msg.id + if not msgs.has_next_page(): + break + + # Delete all messages + for msg_id in all_msgs: + status: MessageDeleted = await cancellation_token.link_future( + asyncio.ensure_future( + self._client.beta.threads.messages.delete(message_id=msg_id, thread_id=self._thread_id) + ) + ) + assert status.deleted is True + + async def _upload_files(self, file_paths: str | Iterable[str], cancellation_token: CancellationToken) -> List[str]: + """Upload files and return their IDs.""" + if isinstance(file_paths, str): + file_paths = [file_paths] + + file_ids: List[str] = [] + for file_path in file_paths: + async with aiofiles.open(file_path, mode="rb") as f: + file_content = await cancellation_token.link_future(asyncio.ensure_future(f.read())) + file_name = os.path.basename(file_path) + + file: FileObject = await cancellation_token.link_future( + asyncio.ensure_future(self._client.files.create(file=(file_name, file_content), purpose="assistants")) + ) + file_ids.append(file.id) + self._uploaded_file_ids.append(file.id) + + return file_ids + + async def on_upload_for_code_interpreter( + self, file_paths: str | Iterable[str], cancellation_token: CancellationToken + ) -> None: + """Handle file uploads for the code interpreter.""" + file_ids = await self._upload_files(file_paths, cancellation_token) + + # Update thread with the new files + thread = await cancellation_token.link_future( + asyncio.ensure_future(self._client.beta.threads.retrieve(thread_id=self._thread_id)) + ) + tool_resources: ToolResources = thread.tool_resources or ToolResources() + code_interpreter: ToolResourcesCodeInterpreter = ( + tool_resources.code_interpreter or ToolResourcesCodeInterpreter() + ) + existing_file_ids: List[str] = code_interpreter.file_ids or [] + existing_file_ids.extend(file_ids) + tool_resources.code_interpreter = ToolResourcesCodeInterpreter(file_ids=existing_file_ids) + + await cancellation_token.link_future( + asyncio.ensure_future( + self._client.beta.threads.update( + thread_id=self._thread_id, + tool_resources=cast(thread_update_params.ToolResources, tool_resources.model_dump()), + ) + ) + ) + + async def on_upload_for_file_search( + self, file_paths: str | Iterable[str], cancellation_token: CancellationToken + ) -> None: + """Handle file uploads for file search.""" + await self._ensure_initialized() + + # Check if file_search is enabled in tools + if not any(tool.get("type") == "file_search" for tool in self._api_tools): + raise ValueError( + "File search is not enabled for this assistant. Add a file_search tool when creating the assistant." + ) + + # Create vector store if not already created + if self._vector_store_id is None: + vector_store: VectorStore = await cancellation_token.link_future( + asyncio.ensure_future(self._client.beta.vector_stores.create()) + ) + self._vector_store_id = vector_store.id + + # Update assistant with vector store ID + await cancellation_token.link_future( + asyncio.ensure_future( + self._client.beta.assistants.update( + assistant_id=self._get_assistant_id, + tool_resources={"file_search": {"vector_store_ids": [self._vector_store_id]}}, + ) + ) + ) + + file_ids = await self._upload_files(file_paths, cancellation_token) + + # Create file batch with the file IDs + await cancellation_token.link_future( + asyncio.ensure_future( + self._client.beta.vector_stores.file_batches.create_and_poll( + vector_store_id=self._vector_store_id, file_ids=file_ids + ) + ) + ) + + async def delete_uploaded_files(self, cancellation_token: CancellationToken) -> None: + """Delete all files that were uploaded by this agent instance.""" + for file_id in self._uploaded_file_ids: + try: + await cancellation_token.link_future(asyncio.ensure_future(self._client.files.delete(file_id=file_id))) + except Exception as e: + event_logger.error(f"Failed to delete file {file_id}: {str(e)}") + self._uploaded_file_ids = [] + + async def delete_assistant(self, cancellation_token: CancellationToken) -> None: + """Delete the assistant if it was created by this instance.""" + if self._assistant is not None and not self._assistant_id: + try: + await cancellation_token.link_future( + asyncio.ensure_future(self._client.beta.assistants.delete(assistant_id=self._get_assistant_id)) + ) + self._assistant = None + except Exception as e: + event_logger.error(f"Failed to delete assistant: {str(e)}") + + async def delete_vector_store(self, cancellation_token: CancellationToken) -> None: + """Delete the vector store if it was created by this instance.""" + if self._vector_store_id is not None: + try: + await cancellation_token.link_future( + asyncio.ensure_future(self._client.beta.vector_stores.delete(vector_store_id=self._vector_store_id)) + ) + self._vector_store_id = None + except Exception as e: + event_logger.error(f"Failed to delete vector store: {str(e)}") diff --git a/python/packages/autogen-ext/src/autogen_ext/code_executor/aca_dynamic_sessions/__init__.py b/python/packages/autogen-ext/src/autogen_ext/code_executor/aca_dynamic_sessions/__init__.py deleted file mode 100644 index 009997c41abc..000000000000 --- a/python/packages/autogen-ext/src/autogen_ext/code_executor/aca_dynamic_sessions/__init__.py +++ /dev/null @@ -1,21 +0,0 @@ -import warnings -from typing import Any - -from ...code_executors import ACADynamicSessionsCodeExecutor - - -class AzureContainerCodeExecutor(ACADynamicSessionsCodeExecutor): - """AzureContainerCodeExecutor has been renamed and moved to autogen_ext.code_executors.ACADynamicSessionsCodeExecutor""" - - def __init__(self, *args: Any, **kwargs: Any) -> None: - warnings.warn( - "AzureContainerCodeExecutor has been renamed and moved to autogen_ext.code_executors.ACADynamicSessionsCodeExecutor", - DeprecationWarning, - stacklevel=2, - ) - super().__init__(*args, **kwargs) - - -__all__ = [ - "AzureContainerCodeExecutor", -] diff --git a/python/packages/autogen-ext/src/autogen_ext/code_executor/docker_executor/__init__.py b/python/packages/autogen-ext/src/autogen_ext/code_executor/docker_executor/__init__.py deleted file mode 100644 index 66719114300d..000000000000 --- a/python/packages/autogen-ext/src/autogen_ext/code_executor/docker_executor/__init__.py +++ /dev/null @@ -1,11 +0,0 @@ -import warnings - -from ...code_executors import DockerCommandLineCodeExecutor - -warnings.warn( - "DockerCommandLineCodeExecutor moved to autogen_ext.code_executors.DockerCommandLineCodeExecutor", - DeprecationWarning, - stacklevel=2, -) - -__all__ = ["DockerCommandLineCodeExecutor"] diff --git a/python/packages/autogen-ext/src/autogen_ext/models/__init__.py b/python/packages/autogen-ext/src/autogen_ext/models/__init__.py index d39c1d9bf247..80533f80575e 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/__init__.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/__init__.py @@ -2,6 +2,13 @@ AzureOpenAIChatCompletionClient, OpenAIChatCompletionClient, ) +from ._openai.config import AzureOpenAIClientConfiguration, OpenAIClientConfiguration from ._reply_chat_completion_client import ReplayChatCompletionClient -__all__ = ["AzureOpenAIChatCompletionClient", "OpenAIChatCompletionClient", "ReplayChatCompletionClient"] +__all__ = [ + "AzureOpenAIClientConfiguration", + "AzureOpenAIChatCompletionClient", + "OpenAIClientConfiguration", + "OpenAIChatCompletionClient", + "ReplayChatCompletionClient", +] diff --git a/python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py b/python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py index aa9f772d0b95..b00ba32da559 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/_openai/_openai_client.py @@ -86,10 +86,29 @@ def _azure_openai_client_from_config(config: Mapping[str, Any]) -> AsyncAzureOpe # Take a copy copied_config = dict(config).copy() + import warnings + + if "azure_deployment" not in copied_config and "model" in copied_config: + warnings.warn( + "Previous behavior of using the model name as the deployment name is deprecated and will be removed in 0.4. Please specify azure_deployment", + stacklevel=2, + ) + + if "azure_endpoint" not in copied_config and "base_url" in copied_config: + warnings.warn( + "Previous behavior of using the base_url as the endpoint is deprecated and will be removed in 0.4. Please specify azure_endpoint", + stacklevel=2, + ) + # Do some fixups copied_config["azure_deployment"] = copied_config.get("azure_deployment", config.get("model")) if copied_config["azure_deployment"] is not None: - copied_config["azure_deployment"] = copied_config["azure_deployment"].replace(".", "") + if "." in copied_config["azure_deployment"]: + warnings.warn( + "Previous behavior stripping '.' from the deployment name is deprecated and will be removed in 0.4", + stacklevel=2, + ) + copied_config["azure_deployment"] = copied_config["azure_deployment"].replace(".", "") copied_config["azure_endpoint"] = copied_config.get("azure_endpoint", copied_config.pop("base_url", None)) # Shave down the config to just the AzureOpenAIChatCompletionClient kwargs @@ -331,9 +350,7 @@ def __init__( model_capabilities: Optional[ModelCapabilities] = None, ): self._client = client - if model_capabilities is None and isinstance(client, AsyncAzureOpenAI): - raise ValueError("AzureOpenAIChatCompletionClient requires explicit model capabilities") - elif model_capabilities is None: + if model_capabilities is None: self._model_capabilities = _model_info.get_capabilities(create_args["model"]) else: self._model_capabilities = model_capabilities @@ -556,6 +573,8 @@ async def create_stream( json_output: Optional[bool] = None, extra_create_args: Mapping[str, Any] = {}, cancellation_token: Optional[CancellationToken] = None, + *, + max_consecutive_empty_chunk_tolerance: int = 0, ) -> AsyncGenerator[Union[str, CreateResult], None]: """ Creates an AsyncGenerator that will yield a stream of chat completions based on the provided messages and tools. @@ -566,6 +585,7 @@ async def create_stream( json_output (Optional[bool], optional): If True, the output will be in JSON format. Defaults to None. extra_create_args (Mapping[str, Any], optional): Additional arguments for the creation process. Default to `{}`. cancellation_token (Optional[CancellationToken], optional): A token to cancel the operation. Defaults to None. + max_consecutive_empty_chunk_tolerance (int): The maximum number of consecutive empty chunks to tolerate before raising a ValueError. This seems to only be needed to set when using `AzureOpenAIChatCompletionClient`. Defaults to 0. Yields: AsyncGenerator[Union[str, CreateResult], None]: A generator yielding the completion results as they are produced. @@ -636,6 +656,8 @@ async def create_stream( full_tool_calls: Dict[int, FunctionCall] = {} completion_tokens = 0 logprobs: Optional[List[ChatCompletionTokenLogprob]] = None + empty_chunk_count = 0 + while True: try: chunk_future = asyncio.ensure_future(anext(stream)) @@ -643,6 +665,20 @@ async def create_stream( cancellation_token.link_future(chunk_future) chunk = await chunk_future + # This is to address a bug in AzureOpenAIChatCompletionClient. OpenAIChatCompletionClient works fine. + # https://github.com/microsoft/autogen/issues/4213 + if len(chunk.choices) == 0: + empty_chunk_count += 1 + if max_consecutive_empty_chunk_tolerance == 0: + raise ValueError( + "Consecutive empty chunks found. Change max_empty_consecutive_chunk_tolerance to increase empty chunk tolerance" + ) + elif empty_chunk_count >= max_consecutive_empty_chunk_tolerance: + raise ValueError("Exceeded the threshold of receiving consecutive empty chunks") + continue + else: + empty_chunk_count = 0 + # to process usage chunk in streaming situations # add stream_options={"include_usage": True} in the initialization of OpenAIChatCompletionClient(...) # However the different api's @@ -851,6 +887,64 @@ def capabilities(self) -> ModelCapabilities: class OpenAIChatCompletionClient(BaseOpenAIChatCompletionClient): + """Chat completion client for OpenAI hosted models. + + You can also use this client for OpenAI-compatible ChatCompletion endpoints. + **Using this client for non-OpenAI models is not tested or guaranteed.** + + For non-OpenAI models, please first take a look at our `community extensions `_ + for additional model clients. + + Args: + model (str): The model to use. **Required.** + api_key (str): The API key to use. **Required if 'OPENAI_API_KEY' is not found in the environment variables.** + timeout (optional, int): The timeout for the request in seconds. + max_retries (optional, int): The maximum number of retries to attempt. + organization_id (optional, str): The organization ID to use. + base_url (optional, str): The base URL to use. **Required if the model is not hosted on OpenAI.** + model_capabilities (optional, ModelCapabilities): The capabilities of the model. **Required if the model name is not a valid OpenAI model.** + + To use this client, you must install the `openai` extension: + + .. code-block:: bash + + pip install 'autogen-ext[openai]==0.4.0.dev7' + + The following code snippet shows how to use the client with an OpenAI model: + + .. code-block:: python + + from autogen_ext.models import OpenAIChatCompletionClient + from autogen_core.components.models import UserMessage + + openai_client = OpenAIChatCompletionClient( + model="gpt-4o-2024-08-06", + # api_key="sk-...", # Optional if you have an OPENAI_API_KEY environment variable set. + ) + + result = await openai_client.create([UserMessage(content="What is the capital of France?", source="user")]) # type: ignore + print(result) + + + To use the client with a non-OpenAI model, you need to provide the base URL of the model and the model capabilities: + + .. code-block:: python + + from autogen_ext.models import OpenAIChatCompletionClient + + custom_model_client = OpenAIChatCompletionClient( + model="custom-model-name", + base_url="https://custom-model.com/reset/of/the/path", + api_key="placeholder", + model_capabilities={ + "vision": True, + "function_calling": True, + "json_output": True, + }, + ) + + """ + def __init__(self, **kwargs: Unpack[OpenAIClientConfiguration]): if "model" not in kwargs: raise ValueError("model is required for OpenAIChatCompletionClient") @@ -877,10 +971,54 @@ def __setstate__(self, state: Dict[str, Any]) -> None: class AzureOpenAIChatCompletionClient(BaseOpenAIChatCompletionClient): - def __init__(self, **kwargs: Unpack[AzureOpenAIClientConfiguration]): - if "model" not in kwargs: - raise ValueError("model is required for OpenAIChatCompletionClient") + """Chat completion client for Azure OpenAI hosted models. + + Args: + azure_endpoint (str): The endpoint for the Azure model. **Required for Azure models.** + model (str): The deployment ID for the Azure model. **Required for Azure models.** + api_version (str): The API version to use. **Required for Azure models.** + azure_ad_token (str): The Azure AD token to use. Provide this or `azure_ad_token_provider` for token-based authentication. + azure_ad_token_provider (Callable[[], Awaitable[str]]): The Azure AD token provider to use. Provide this or `azure_ad_token` for token-based authentication. + model_capabilities (ModelCapabilities): The capabilities of the model if default resolved values are not correct. + api_key (optional, str): The API key to use, use this if you are using key based authentication. It is optional if you are using Azure AD token based authentication or `AZURE_OPENAI_API_KEY` environment variable. + timeout (optional, int): The timeout for the request in seconds. + max_retries (optional, int): The maximum number of retries to attempt. + + To use this client, you must install the `azure` and `openai` extensions: + + .. code-block:: bash + pip install 'autogen-ext[openai,azure]==0.4.0.dev7' + + To use the client, you need to provide your deployment id, Azure Cognitive Services endpoint, + api version, and model capabilities. + For authentication, you can either provide an API key or an Azure Active Directory (AAD) token credential. + + The following code snippet shows how to use AAD authentication. + The identity used must be assigned the `Cognitive Services OpenAI User `_ role. + + .. code-block:: python + + from autogen_ext.models import AzureOpenAIChatCompletionClient + from azure.identity import DefaultAzureCredential, get_bearer_token_provider + + # Create the token provider + token_provider = get_bearer_token_provider(DefaultAzureCredential(), "https://cognitiveservices.azure.com/.default") + + az_model_client = AzureOpenAIChatCompletionClient( + azure_deployment="{your-azure-deployment}", + model="{deployed-model, such as 'gpt-4o'}", + api_version="2024-06-01", + azure_endpoint="https://{your-custom-endpoint}.openai.azure.com/", + azure_ad_token_provider=token_provider, # Optional if you choose key-based authentication. + # api_key="sk-...", # For key-based authentication. `AZURE_OPENAI_API_KEY` environment variable can also be used instead. + ) + + See `here `_ for how to use the Azure client directly or for more info. + + """ + + def __init__(self, **kwargs: Unpack[AzureOpenAIClientConfiguration]): model_capabilities: Optional[ModelCapabilities] = None copied_args = dict(kwargs).copy() if "model_capabilities" in kwargs: diff --git a/python/packages/autogen-ext/src/autogen_ext/models/_openai/config/__init__.py b/python/packages/autogen-ext/src/autogen_ext/models/_openai/config/__init__.py index b6729a70d11e..8afff868293e 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/_openai/config/__init__.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/_openai/config/__init__.py @@ -30,14 +30,14 @@ class BaseOpenAIClientConfiguration(CreateArguments, total=False): api_key: str timeout: Union[float, None] max_retries: int + model_capabilities: ModelCapabilities + """What functionality the model supports, determined by default from model name but is overriden if value passed.""" # See OpenAI docs for explanation of these parameters class OpenAIClientConfiguration(BaseOpenAIClientConfiguration, total=False): organization: str base_url: str - # Not required - model_capabilities: ModelCapabilities class AzureOpenAIClientConfiguration(BaseOpenAIClientConfiguration, total=False): @@ -47,5 +47,6 @@ class AzureOpenAIClientConfiguration(BaseOpenAIClientConfiguration, total=False) api_version: Required[str] azure_ad_token: str azure_ad_token_provider: AsyncAzureADTokenProvider - # Must be provided - model_capabilities: Required[ModelCapabilities] + + +__all__ = ["AzureOpenAIClientConfiguration", "OpenAIClientConfiguration"] diff --git a/python/packages/autogen-ext/src/autogen_ext/models/_reply_chat_completion_client.py b/python/packages/autogen-ext/src/autogen_ext/models/_reply_chat_completion_client.py index a4889f44b415..187dfdace14a 100644 --- a/python/packages/autogen-ext/src/autogen_ext/models/_reply_chat_completion_client.py +++ b/python/packages/autogen-ext/src/autogen_ext/models/_reply_chat_completion_client.py @@ -38,50 +38,73 @@ class ReplayChatCompletionClient: .. code-block:: python - chat_completions = [ - "Hello, how can I assist you today?", - "I'm happy to help with any questions you have.", - "Is there anything else I can assist you with?", - ] - client = ReplayChatCompletionClient(chat_completions) - messages = [LLMMessage(content="What can you do?")] - response = await client.create(messages) - print(response.content) # Output: "Hello, how can I assist you today?" + from autogen_ext.models import ReplayChatCompletionClient + from autogen_core.components.models import UserMessage + + + async def example(): + chat_completions = [ + "Hello, how can I assist you today?", + "I'm happy to help with any questions you have.", + "Is there anything else I can assist you with?", + ] + client = ReplayChatCompletionClient(chat_completions) + messages = [UserMessage(content="What can you do?", source="user")] + response = await client.create(messages) + print(response.content) # Output: "Hello, how can I assist you today?" Simple streaming chat completion client to return pre-defined responses .. code-block:: python - chat_completions = [ - "Hello, how can I assist you today?", - "I'm happy to help with any questions you have.", - "Is there anything else I can assist you with?", - ] - client = ReplayChatCompletionClient(chat_completions) + import asyncio + from autogen_ext.models import ReplayChatCompletionClient + from autogen_core.components.models import UserMessage + + + async def example(): + chat_completions = [ + "Hello, how can I assist you today?", + "I'm happy to help with any questions you have.", + "Is there anything else I can assist you with?", + ] + client = ReplayChatCompletionClient(chat_completions) + messages = [UserMessage(content="What can you do?", source="user")] + + async for token in client.create_stream(messages): + print(token, end="") # Output: "Hello, how can I assist you today?" - async for token in client.create_stream(messages): - print(token, end="") # Output: "Hello, how can I assist you today?" + async for token in client.create_stream(messages): + print(token, end="") # Output: "I'm happy to help with any questions you have." - async for token in client.create_stream(messages): - print(token, end="") # Output: "I'm happy to help with any questions you have." + asyncio.run(example()) Using `.reset` to reset the chat client state .. code-block:: python - chat_completions = [ - "Hello, how can I assist you today?", - ] - client = ReplayChatCompletionClient(chat_completions) - messages = [LLMMessage(content="What can you do?")] - response = await client.create(messages) - print(response.content) # Output: "Hello, how can I assist you today?" + import asyncio + from autogen_ext.models import ReplayChatCompletionClient + from autogen_core.components.models import UserMessage + + + async def example(): + chat_completions = [ + "Hello, how can I assist you today?", + ] + client = ReplayChatCompletionClient(chat_completions) + messages = [UserMessage(content="What can you do?", source="user")] + response = await client.create(messages) + print(response.content) # Output: "Hello, how can I assist you today?" + + response = await client.create(messages) # Raises ValueError("No more mock responses available") + + client.reset() # Reset the client state (current index of message and token usages) + response = await client.create(messages) + print(response.content) # Output: "Hello, how can I assist you today?" again - response = await client.create(messages) # Raises ValueError("No more mock responses available") - client.reset() # Reset the client state (current index of message and token usages) - response = await client.create(messages) - print(response.content) # Output: "Hello, how can I assist you today?" again + asyncio.run(example()) """ diff --git a/python/packages/autogen-ext/src/autogen_ext/tools/langchain/__init__.py b/python/packages/autogen-ext/src/autogen_ext/tools/langchain/__init__.py deleted file mode 100644 index 4d401fc7ef1f..000000000000 --- a/python/packages/autogen-ext/src/autogen_ext/tools/langchain/__init__.py +++ /dev/null @@ -1,7 +0,0 @@ -import warnings - -from ...tools import LangChainToolAdapter - -warnings.warn("LangChainToolAdapter moved to autogen_ext.tools.LangChainToolAdapter", DeprecationWarning, stacklevel=2) - -__all__ = ["LangChainToolAdapter"] diff --git a/python/packages/autogen-ext/tests/models/test_openai_model_client.py b/python/packages/autogen-ext/tests/models/test_openai_model_client.py index a51e33c0234a..b2dc504abd0e 100644 --- a/python/packages/autogen-ext/tests/models/test_openai_model_client.py +++ b/python/packages/autogen-ext/tests/models/test_openai_model_client.py @@ -1,5 +1,5 @@ import asyncio -from typing import Any, AsyncGenerator, List, Tuple +from typing import Annotated, Any, AsyncGenerator, List, Tuple from unittest.mock import MagicMock import pytest @@ -15,17 +15,25 @@ SystemMessage, UserMessage, ) -from autogen_core.components.tools import FunctionTool +from autogen_core.components.tools import BaseTool, FunctionTool from autogen_ext.models import AzureOpenAIChatCompletionClient, OpenAIChatCompletionClient from autogen_ext.models._openai._model_info import resolve_model -from autogen_ext.models._openai._openai_client import calculate_vision_tokens +from autogen_ext.models._openai._openai_client import calculate_vision_tokens, convert_tools from openai.resources.chat.completions import AsyncCompletions from openai.types.chat.chat_completion import ChatCompletion, Choice from openai.types.chat.chat_completion_chunk import ChatCompletionChunk, ChoiceDelta from openai.types.chat.chat_completion_chunk import Choice as ChunkChoice from openai.types.chat.chat_completion_message import ChatCompletionMessage from openai.types.completion_usage import CompletionUsage -from pydantic import BaseModel +from pydantic import BaseModel, Field + + +class MyResult(BaseModel): + result: str = Field(description="The other description.") + + +class MyArgs(BaseModel): + query: str = Field(description="The description.") class MockChunkDefinition(BaseModel): @@ -133,6 +141,7 @@ async def test_openai_chat_completion_client() -> None: @pytest.mark.asyncio async def test_azure_openai_chat_completion_client() -> None: client = AzureOpenAIChatCompletionClient( + azure_deployment="gpt-4o-1", model="gpt-4o", api_key="api_key", api_version="2020-08-04", @@ -302,3 +311,38 @@ def test_openai_count_image_tokens(mock_size: Tuple[int, int], expected_num_toke # Directly call calculate_vision_tokens and check the result calculated_tokens = calculate_vision_tokens(mock_image, detail="auto") assert calculated_tokens == expected_num_tokens + + +def test_convert_tools_accepts_both_func_tool_and_schema() -> None: + def my_function(arg: str, other: Annotated[int, "int arg"], nonrequired: int = 5) -> MyResult: + return MyResult(result="test") + + tool = FunctionTool(my_function, description="Function tool.") + schema = tool.schema + + converted_tool_schema = convert_tools([tool, schema]) + + assert len(converted_tool_schema) == 2 + assert converted_tool_schema[0] == converted_tool_schema[1] + + +def test_convert_tools_accepts_both_tool_and_schema() -> None: + class MyTool(BaseTool[MyArgs, MyResult]): + def __init__(self) -> None: + super().__init__( + args_type=MyArgs, + return_type=MyResult, + name="TestTool", + description="Description of test tool.", + ) + + async def run(self, args: MyArgs, cancellation_token: CancellationToken) -> MyResult: + return MyResult(result="value") + + tool = MyTool() + schema = tool.schema + + converted_tool_schema = convert_tools([tool, schema]) + + assert len(converted_tool_schema) == 2 + assert converted_tool_schema[0] == converted_tool_schema[1] diff --git a/python/packages/autogen-ext/tests/test_openai_assistant_agent.py b/python/packages/autogen-ext/tests/test_openai_assistant_agent.py new file mode 100644 index 000000000000..3cd3e87da3e9 --- /dev/null +++ b/python/packages/autogen-ext/tests/test_openai_assistant_agent.py @@ -0,0 +1,140 @@ +import os +from enum import Enum +from typing import List, Literal, Optional, Union + +import pytest +from autogen_agentchat.messages import TextMessage +from autogen_core.base import CancellationToken +from autogen_core.components.tools._base import BaseTool, Tool +from autogen_ext.agents import OpenAIAssistantAgent +from openai import AsyncAzureOpenAI +from pydantic import BaseModel + + +class QuestionType(str, Enum): + MULTIPLE_CHOICE = "MULTIPLE_CHOICE" + FREE_RESPONSE = "FREE_RESPONSE" + + +class Question(BaseModel): + question_text: str + question_type: QuestionType + choices: Optional[List[str]] = None + + +class DisplayQuizArgs(BaseModel): + title: str + questions: List[Question] + + +class QuizResponses(BaseModel): + responses: List[str] + + +class DisplayQuizTool(BaseTool[DisplayQuizArgs, QuizResponses]): + def __init__(self) -> None: + super().__init__( + args_type=DisplayQuizArgs, + return_type=QuizResponses, + name="display_quiz", + description=( + "Displays a quiz to the student and returns the student's responses. " + "A single quiz can have multiple questions." + ), + ) + + async def run(self, args: DisplayQuizArgs, cancellation_token: CancellationToken) -> QuizResponses: + responses: List[str] = [] + for q in args.questions: + if q.question_type == QuestionType.MULTIPLE_CHOICE: + response = q.choices[0] if q.choices else "" + elif q.question_type == QuestionType.FREE_RESPONSE: + response = "Sample free response" + else: + response = "" + responses.append(response) + return QuizResponses(responses=responses) + + +@pytest.fixture +def client() -> AsyncAzureOpenAI: + azure_endpoint = os.getenv("AZURE_OPENAI_ENDPOINT") + api_version = os.getenv("AZURE_OPENAI_API_VERSION", "2024-08-01-preview") + api_key = os.getenv("AZURE_OPENAI_API_KEY") + + if not all([azure_endpoint, api_key]): + pytest.skip("Azure OpenAI credentials not found in environment variables") + + assert azure_endpoint is not None + assert api_key is not None + return AsyncAzureOpenAI(azure_endpoint=azure_endpoint, api_version=api_version, api_key=api_key) + + +@pytest.fixture +def agent(client: AsyncAzureOpenAI) -> OpenAIAssistantAgent: + tools: List[Union[Literal["code_interpreter", "file_search"], Tool]] = [ + "code_interpreter", + "file_search", + DisplayQuizTool(), + ] + + return OpenAIAssistantAgent( + name="assistant", + instructions="Help the user with their task.", + model="gpt-4o-mini", + description="OpenAI Assistant Agent", + client=client, + tools=tools, + ) + + +@pytest.fixture +def cancellation_token() -> CancellationToken: + return CancellationToken() + + +@pytest.mark.asyncio +async def test_file_retrieval(agent: OpenAIAssistantAgent, cancellation_token: CancellationToken) -> None: + file_path = r"C:\Users\lpinheiro\Github\autogen-test\data\SampleBooks\jungle_book.txt" + await agent.on_upload_for_file_search(file_path, cancellation_token) + + message = TextMessage(source="user", content="What is the first sentence of the jungle scout book?") + response = await agent.on_messages([message], cancellation_token) + + assert response.chat_message.content is not None + assert isinstance(response.chat_message.content, str) + assert len(response.chat_message.content) > 0 + + await agent.delete_uploaded_files(cancellation_token) + await agent.delete_vector_store(cancellation_token) + await agent.delete_assistant(cancellation_token) + + +@pytest.mark.asyncio +async def test_code_interpreter(agent: OpenAIAssistantAgent, cancellation_token: CancellationToken) -> None: + message = TextMessage(source="user", content="I need to solve the equation `3x + 11 = 14`. Can you help me?") + response = await agent.on_messages([message], cancellation_token) + + assert response.chat_message.content is not None + assert isinstance(response.chat_message.content, str) + assert len(response.chat_message.content) > 0 + assert "x = 1" in response.chat_message.content.lower() + + await agent.delete_assistant(cancellation_token) + + +@pytest.mark.asyncio +async def test_quiz_creation(agent: OpenAIAssistantAgent, cancellation_token: CancellationToken) -> None: + message = TextMessage( + source="user", + content="Create a short quiz about basic math with one multiple choice question and one free response question.", + ) + response = await agent.on_messages([message], cancellation_token) + + assert response.chat_message.content is not None + assert isinstance(response.chat_message.content, str) + assert len(response.chat_message.content) > 0 + assert isinstance(response.inner_messages, list) + assert any(tool_msg.content for tool_msg in response.inner_messages if hasattr(tool_msg, "content")) + + await agent.delete_assistant(cancellation_token) diff --git a/python/packages/autogen-magentic-one/README.md b/python/packages/autogen-magentic-one/README.md index 123d3580e393..12c8498bf979 100644 --- a/python/packages/autogen-magentic-one/README.md +++ b/python/packages/autogen-magentic-one/README.md @@ -67,22 +67,24 @@ You can install the Magentic-One package and then run the example code to see ho uv sync --all-extras source .venv/bin/activate ``` - Install magentic-one from source: + For Windows, run `.venv\Scripts\activate` to activate the environment. + +2. Install magentic-one from source: ```bash cd packages/autogen-magentic-one pip install -e . ``` + + The following instructions are for running the example code: -The following instructions are for running the example code: - -2. Configure the environment variables for the chat completion client. See instructions below [Environment Configuration for Chat Completion Client](#environment-configuration-for-chat-completion-client). -3. Magentic-One code uses code execution, you need to have [Docker installed](https://docs.docker.com/engine/install/) to run any examples. -4. Magentic-One uses playwright to interact with web pages. You need to install the playwright dependencies. Run the following command to install the playwright dependencies: +3. Configure the environment variables for the chat completion client. See instructions below [Environment Configuration for Chat Completion Client](#environment-configuration-for-chat-completion-client). +4. Magentic-One code uses code execution, you need to have [Docker installed](https://docs.docker.com/engine/install/) to run any examples. +5. Magentic-One uses playwright to interact with web pages. You need to install the playwright dependencies. Run the following command to install the playwright dependencies: ```bash playwright install --with-deps chromium ``` -5. Now you can run the example code to see how the agents work together to accomplish a task. +6. Now you can run the example code to see how the agents work together to accomplish a task. > [!CAUTION] > The example code may download files from the internet, execute code, and interact with web pages. Ensure you are in a safe environment before running the example code. @@ -108,7 +110,7 @@ playwright install --with-deps chromium - hil_mode: (Optional) Enable human-in-the-loop mode (default: disabled) - save_screenshots: (Optional) Save screenshots of browser (default: disabled) -6. [Preview] We have a preview API for Magentic-One. +7. [Preview] We have a preview API for Magentic-One. You can use the `MagenticOneHelper` class to interact with the system and stream logs. See the [interface README](interface/README.md) for more details. @@ -139,6 +141,12 @@ To configure for Azure OpenAI service, set the following environment variables: } ``` +This project uses Azure OpenAI service with [Entra ID authentcation by default](https://learn.microsoft.com/azure/ai-services/openai/how-to/managed-identity). If you run the examples on a local device, you can use the Azure CLI cached credentials for testing: + +Log in to Azure using `az login`, and then run the examples. The account used must have [RBAC permissions](https://learn.microsoft.com/azure/ai-services/openai/how-to/role-based-access-control) like `Azure Cognitive Services OpenAI User` for the OpenAI service; otherwise, you will receive the error: Principal does not have access to API/Operation. + +Note that even if you are the owner of the subscription, you still need to grant the necessary Azure Cognitive Services OpenAI permissions to call the API. + ### With OpenAI To configure for OpenAI, set the following environment variables: diff --git a/python/packages/autogen-studio/README.md b/python/packages/autogen-studio/README.md index 6aa86ce51156..007210350829 100644 --- a/python/packages/autogen-studio/README.md +++ b/python/packages/autogen-studio/README.md @@ -3,11 +3,11 @@ [![PyPI version](https://badge.fury.io/py/autogenstudio.svg)](https://badge.fury.io/py/autogenstudio) [![Downloads](https://static.pepy.tech/badge/autogenstudio/week)](https://pepy.tech/project/autogenstudio) -![ARA](./docs/ara_stockprices.png) +![ARA](./docs/ags_screen.png) AutoGen Studio is an AutoGen-powered AI app (user interface) to help you rapidly prototype AI agents, enhance them with skills, compose them into workflows and interact with them to accomplish tasks. It is built on top of the [AutoGen](https://microsoft.github.io/autogen) framework, which is a toolkit for building AI agents. -Code for AutoGen Studio is on GitHub at [microsoft/autogen](https://github.com/microsoft/autogen/tree/main/samples/apps/autogen-studio) +Code for AutoGen Studio is on GitHub at [microsoft/autogen](https://github.com/microsoft/autogen/tree/main/python/packages/autogen-studio) > **Note**: AutoGen Studio is meant to help you rapidly prototype multi-agent workflows and demonstrate an example of end user interfaces built with AutoGen. It is not meant to be a production-ready app. @@ -16,6 +16,7 @@ Code for AutoGen Studio is on GitHub at [microsoft/autogen](https://github.com/m **Updates** +> Nov 14: AutoGen Studio is being rewritten to use the updated AutoGen 0.4.0 api AgentChat api. > April 17: AutoGen Studio database layer is now rewritten to use [SQLModel](https://sqlmodel.tiangolo.com/) (Pydantic + SQLAlchemy). This provides entity linking (skills, models, agents and workflows are linked via association tables) and supports multiple [database backend dialects](https://docs.sqlalchemy.org/en/20/dialects/) supported in SQLAlchemy (SQLite, PostgreSQL, MySQL, Oracle, Microsoft SQL Server). The backend database can be specified a `--database-uri` argument when running the application. For example, `autogenstudio ui --database-uri sqlite:///database.sqlite` for SQLite and `autogenstudio ui --database-uri postgresql+psycopg://user:password@localhost/dbname` for PostgreSQL. > March 12: Default directory for AutoGen Studio is now /home//.autogenstudio. You can also specify this directory using the `--appdir` argument when running the application. For example, `autogenstudio ui --appdir /path/to/folder`. This will store the database and other files in the specified directory e.g. `/path/to/folder/database.sqlite`. `.env` files in that directory will be used to set environment variables for the app. @@ -49,7 +50,7 @@ There are two ways to install AutoGen Studio - from PyPi or from source. We **re pip install -e . ``` - - Navigate to the `samples/apps/autogen-studio/frontend` directory, install dependencies, and build the UI: + - Navigate to the `python/packages/autogen-studio/frontend` directory, install dependencies, and build the UI: ```bash npm install -g gatsby-cli @@ -84,20 +85,28 @@ AutoGen Studio also takes several parameters to customize the application: - `--port ` argument to specify the port number. By default, it is set to `8080`. - `--reload` argument to enable auto-reloading of the server when changes are made to the code. By default, it is set to `False`. - `--database-uri` argument to specify the database URI. Example values include `sqlite:///database.sqlite` for SQLite and `postgresql+psycopg://user:password@localhost/dbname` for PostgreSQL. If this is not specified, the database URIL defaults to a `database.sqlite` file in the `--appdir` directory. +- `--upgrade-database` argument to upgrade the database schema to the latest version. By default, it is set to `False`. Now that you have AutoGen Studio installed and running, you are ready to explore its capabilities, including defining and modifying agent workflows, interacting with agents and sessions, and expanding agent skills. #### If running from source + When running from source, you need to separately bring up the frontend server. + 1. Open a separate terminal and change directory to the frontend + ```bash cd frontend ``` + 3. Create a `.env.development` file. + ```bash cp .env.default .env.development ``` + 3. Launch frontend server + ```bash npm run start ``` @@ -107,12 +116,11 @@ npm run start We welcome contributions to AutoGen Studio. We recommend the following general steps to contribute to the project: - Review the overall AutoGen project [contribution guide](https://github.com/microsoft/autogen?tab=readme-ov-file#contributing) -- Please review the AutoGen Studio [roadmap](https://github.com/microsoft/autogen/issues/737) to get a sense of the current priorities for the project. Help is appreciated especially with Studio issues tagged with `help-wanted` +- Please review the AutoGen Studio [roadmap](https://github.com/microsoft/autogen/issues/4006) to get a sense of the current priorities for the project. Help is appreciated especially with Studio issues tagged with `help-wanted` - Please initiate a discussion on the roadmap issue or a new issue to discuss your proposed contribution. -- Please review the autogenstudio dev branch here [dev branch](https://github.com/microsoft/autogen/tree/autogenstudio) and use as a base for your contribution. This way, your contribution will be aligned with the latest changes in the AutoGen Studio project. - Submit a pull request with your contribution! - If you are modifying AutoGen Studio, it has its own devcontainer. See instructions in `.devcontainer/README.md` to use it -- Please use the tag `studio` for any issues, questions, and PRs related to Studio +- Please use the tag `proj-studio` for any issues, questions, and PRs related to Studio ## FAQ diff --git a/python/packages/autogen-studio/autogenstudio/components/__init__.py b/python/packages/autogen-studio/autogenstudio/components/__init__.py new file mode 100644 index 000000000000..ac3f3116bc2f --- /dev/null +++ b/python/packages/autogen-studio/autogenstudio/components/__init__.py @@ -0,0 +1 @@ +from .agents.userproxy import UserProxyAgent diff --git a/python/packages/autogen-studio/autogenstudio/components/agents/__init__.py b/python/packages/autogen-studio/autogenstudio/components/agents/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/python/packages/autogen-studio/autogenstudio/components/agents/userproxy.py b/python/packages/autogen-studio/autogenstudio/components/agents/userproxy.py new file mode 100644 index 000000000000..cb0fba52e07f --- /dev/null +++ b/python/packages/autogen-studio/autogenstudio/components/agents/userproxy.py @@ -0,0 +1,47 @@ +from typing import Callable, List, Optional, Sequence, Union, Awaitable +from inspect import iscoroutinefunction + +from autogen_agentchat.agents import BaseChatAgent +from autogen_agentchat.base import Response +from autogen_agentchat.messages import ChatMessage, TextMessage +from autogen_core.base import CancellationToken +import asyncio + + +class UserProxyAgent(BaseChatAgent): + """An agent that can represent a human user in a chat.""" + + def __init__( + self, + name: str, + description: Optional[str] = "a", + input_func: Optional[Union[Callable[..., str], + Callable[..., Awaitable[str]]]] = None + ) -> None: + super().__init__(name, description=description) + self.input_func = input_func or input + self._is_async = iscoroutinefunction( + input_func) if input_func else False + + @property + def produced_message_types(self) -> List[type[ChatMessage]]: + return [TextMessage] + + async def _get_input(self, prompt: str) -> str: + """Handle both sync and async input functions""" + if self._is_async: + return await self.input_func(prompt) + else: + return await asyncio.get_event_loop().run_in_executor(None, self.input_func, prompt) + + async def on_messages(self, messages: Sequence[ChatMessage], cancellation_token: CancellationToken) -> Response: + + try: + user_input = await self._get_input("Enter your response: ") + return Response(chat_message=TextMessage(content=user_input, source=self.name)) + except Exception as e: + # Consider logging the error here + raise RuntimeError(f"Failed to get user input: {str(e)}") from e + + async def on_reset(self, cancellation_token: CancellationToken) -> None: + pass diff --git a/python/packages/autogen-studio/autogenstudio/database/__init__.py b/python/packages/autogen-studio/autogenstudio/database/__init__.py index ac87c41f0bd7..18e67140acce 100644 --- a/python/packages/autogen-studio/autogenstudio/database/__init__.py +++ b/python/packages/autogen-studio/autogenstudio/database/__init__.py @@ -1,3 +1,3 @@ from .db_manager import DatabaseManager -from .component_factory import ComponentFactory +from .component_factory import ComponentFactory, Component from .config_manager import ConfigurationManager diff --git a/python/packages/autogen-studio/autogenstudio/database/component_factory.py b/python/packages/autogen-studio/autogenstudio/database/component_factory.py index 7c0275275135..bd8fec61a36a 100644 --- a/python/packages/autogen-studio/autogenstudio/database/component_factory.py +++ b/python/packages/autogen-studio/autogenstudio/database/component_factory.py @@ -1,6 +1,6 @@ import os from pathlib import Path -from typing import List, Literal, Union, Optional, Dict, Any, Type +from typing import Callable, List, Literal, Union, Optional, Dict, Any, Type from datetime import datetime import json from autogen_agentchat.task import MaxMessageTermination, TextMentionTermination, StopMessageTermination @@ -13,6 +13,7 @@ TeamTypes, AgentTypes, ModelTypes, ToolTypes, ComponentType, ComponentConfig, ComponentConfigInput, TerminationConfig, TerminationTypes, Response ) +from ..components import UserProxyAgent from autogen_agentchat.agents import AssistantAgent from autogen_agentchat.teams import RoundRobinGroupChat, SelectorGroupChat from autogen_ext.models import OpenAIChatCompletionClient @@ -38,6 +39,17 @@ Component = Union[RoundRobinGroupChat, SelectorGroupChat, AssistantAgent, OpenAIChatCompletionClient, FunctionTool] +DEFAULT_SELECTOR_PROMPT = """You are in a role play game. The following roles are available: +{roles}. +Read the following conversation. Then select the next role from {participants} to play. Only return the role. + +{history} + +Read the above conversation. Then select the next role from {participants} to play. Only return the role. +""" + +CONFIG_RETURN_TYPES = Literal['object', 'dict', 'config'] + class ComponentFactory: """Creates and manages agent components with versioned configuration loading""" @@ -55,19 +67,22 @@ def __init__(self): self._tool_cache: Dict[str, FunctionTool] = {} self._last_cache_clear = datetime.now() - async def load(self, component: ComponentConfigInput, return_type: ReturnType = 'object') -> Union[Component, dict, ComponentConfig]: + async def load( + self, + component: ComponentConfigInput, + input_func: Optional[Callable] = None, + return_type: ReturnType = 'object' + ) -> Union[Component, dict, ComponentConfig]: """ Universal loader for any component type Args: component: Component configuration (file path, dict, or ComponentConfig) + input_func: Optional callable for user input handling return_type: Type of return value ('object', 'dict', or 'config') Returns: Component instance, config dict, or ComponentConfig based on return_type - - Raises: - ValueError: If component type is unknown or version unsupported """ try: # Load and validate config @@ -95,8 +110,8 @@ async def load(self, component: ComponentConfigInput, return_type: ReturnType = # Otherwise create and return component instance handlers = { - ComponentType.TEAM: self.load_team, - ComponentType.AGENT: self.load_agent, + ComponentType.TEAM: lambda c: self.load_team(c, input_func), + ComponentType.AGENT: lambda c: self.load_agent(c, input_func), ComponentType.MODEL: self.load_model, ComponentType.TOOL: self.load_tool, ComponentType.TERMINATION: self.load_termination @@ -113,7 +128,7 @@ async def load(self, component: ComponentConfigInput, return_type: ReturnType = logger.error(f"Failed to load component: {str(e)}") raise - async def load_directory(self, directory: Union[str, Path], check_exists: bool = False, return_type: ReturnType = 'object') -> List[Union[Component, dict, ComponentConfig]]: + async def load_directory(self, directory: Union[str, Path], return_type: ReturnType = 'object') -> List[Union[Component, dict, ComponentConfig]]: """ Import all component configurations from a directory. """ @@ -124,7 +139,7 @@ async def load_directory(self, directory: Union[str, Path], check_exists: bool = for path in list(directory.glob("*")): if path.suffix.lower().endswith(('.json', '.yaml', '.yml')): try: - component = await self.load(path, return_type) + component = await self.load(path, return_type=return_type) components.append(component) except Exception as e: logger.info( @@ -176,22 +191,17 @@ async def load_termination(self, config: TerminationConfig) -> TerminationCompon raise ValueError( f"Termination condition creation failed: {str(e)}") - async def load_team(self, config: TeamConfig) -> TeamComponent: + async def load_team( + self, + config: TeamConfig, + input_func: Optional[Callable] = None + ) -> TeamComponent: """Create team instance from configuration.""" - - default_selector_prompt = """You are in a role play game. The following roles are available: -{roles}. -Read the following conversation. Then select the next role from {participants} to play. Only return the role. - -{history} - -Read the above conversation. Then select the next role from {participants} to play. Only return the role. -""" try: - # Load participants (agents) + # Load participants (agents) with input_func participants = [] for participant in config.participants: - agent = await self.load(participant) + agent = await self.load(participant, input_func=input_func) participants.append(agent) # Load model client if specified @@ -202,7 +212,6 @@ async def load_team(self, config: TeamConfig) -> TeamComponent: # Load termination condition if specified termination = None if config.termination_condition: - # Now we can use the universal load() method since termination is a proper component termination = await self.load(config.termination_condition) # Create team based on type @@ -215,7 +224,7 @@ async def load_team(self, config: TeamConfig) -> TeamComponent: if not model_client: raise ValueError( "SelectorGroupChat requires a model_client") - selector_prompt = config.selector_prompt if config.selector_prompt else default_selector_prompt + selector_prompt = config.selector_prompt if config.selector_prompt else DEFAULT_SELECTOR_PROMPT return SelectorGroupChat( participants=participants, model_client=model_client, @@ -229,14 +238,20 @@ async def load_team(self, config: TeamConfig) -> TeamComponent: logger.error(f"Failed to create team {config.name}: {str(e)}") raise ValueError(f"Team creation failed: {str(e)}") - async def load_agent(self, config: AgentConfig) -> AgentComponent: + async def load_agent( + self, + config: AgentConfig, + input_func: Optional[Callable] = None + ) -> AgentComponent: """Create agent instance from configuration.""" try: # Load model client if specified model_client = None if config.model_client: model_client = await self.load(config.model_client) + system_message = config.system_message if config.system_message else "You are a helpful assistant" + # Load tools if specified tools = [] if config.tools: @@ -244,9 +259,16 @@ async def load_agent(self, config: AgentConfig) -> AgentComponent: tool = await self.load(tool_config) tools.append(tool) - if config.agent_type == AgentTypes.ASSISTANT: + if config.agent_type == AgentTypes.USERPROXY: + return UserProxyAgent( + name=config.name, + description=config.description or "A human user", + input_func=input_func # Pass through to UserProxyAgent + ) + elif config.agent_type == AgentTypes.ASSISTANT: return AssistantAgent( name=config.name, + description=config.description or "A helpful assistant", model_client=model_client, tools=tools, system_message=system_message diff --git a/python/packages/autogen-studio/autogenstudio/database/db_manager.py b/python/packages/autogen-studio/autogenstudio/database/db_manager.py index b1808f245e2f..d5f17492ed3c 100644 --- a/python/packages/autogen-studio/autogenstudio/database/db_manager.py +++ b/python/packages/autogen-studio/autogenstudio/database/db_manager.py @@ -1,3 +1,4 @@ +from pathlib import Path import threading from datetime import datetime from typing import Optional @@ -19,15 +20,34 @@ class DatabaseManager: _init_lock = threading.Lock() - def __init__(self, engine_uri: str, auto_upgrade: bool = True): + def __init__( + self, + engine_uri: str, + base_dir: Optional[Path | str] = None, + auto_upgrade: bool = True + ): + """ + Initialize DatabaseManager with optional custom base directory. + + Args: + engine_uri: Database connection URI + base_dir: Custom base directory for Alembic files. If None, uses current working directory + auto_upgrade: Whether to automatically upgrade schema when differences found + """ + # Convert string path to Path object if necessary + if isinstance(base_dir, str): + base_dir = Path(base_dir) + connection_args = { - "check_same_thread": True} if "sqlite" in engine_uri else {} + "check_same_thread": True + } if "sqlite" in engine_uri else {} + self.engine = create_engine(engine_uri, connect_args=connection_args) self.schema_manager = SchemaManager( engine=self.engine, + base_dir=base_dir, auto_upgrade=auto_upgrade, ) - # Check and upgrade on startup upgraded, status = self.schema_manager.check_and_upgrade() if upgraded: diff --git a/python/packages/autogen-studio/autogenstudio/database/schema_manager.py b/python/packages/autogen-studio/autogenstudio/database/schema_manager.py index 450e0a5d76a3..d4d7de42c8e7 100644 --- a/python/packages/autogen-studio/autogenstudio/database/schema_manager.py +++ b/python/packages/autogen-studio/autogenstudio/database/schema_manager.py @@ -10,6 +10,7 @@ from alembic.autogenerate import compare_metadata from sqlalchemy import Engine from sqlmodel import SQLModel +from alembic.util.exc import CommandError class SchemaManager: @@ -29,6 +30,7 @@ class SchemaManager: def __init__( self, engine: Engine, + base_dir: Optional[Path] = None, auto_upgrade: bool = True, init_mode: str = "auto" ): @@ -38,16 +40,45 @@ def __init__( self.engine = engine self.auto_upgrade = auto_upgrade - # Set up paths relative to this file - self.base_dir = Path(__file__).parent + # Use provided base_dir or default to class file location + self.base_dir = base_dir or Path(__file__).parent self.alembic_dir = self.base_dir / 'alembic' self.alembic_ini_path = self.base_dir / 'alembic.ini' - # Handle initialization based on mode - if init_mode == "none": - self._validate_alembic_setup() + # Create base directory if it doesn't exist + self.base_dir.mkdir(parents=True, exist_ok=True) + + # Initialize based on mode + if init_mode == "force": + self._cleanup_existing_alembic() + self._initialize_alembic() + else: + try: + self._validate_alembic_setup() + logger.info("Using existing Alembic configuration") + # Update existing configuration + self._update_configuration() + except FileNotFoundError: + if init_mode == "none": + raise + logger.info("Initializing new Alembic configuration") + self._initialize_alembic() + + def _update_configuration(self) -> None: + """Updates existing Alembic configuration with current settings.""" + logger.info("Updating existing Alembic configuration...") + + # Update alembic.ini + config_content = self._generate_alembic_ini_content() + with open(self.alembic_ini_path, 'w') as f: + f.write(config_content) + + # Update env.py + env_path = self.alembic_dir / 'env.py' + if env_path.exists(): + self._update_env_py(env_path) else: - self._ensure_alembic_setup(force=init_mode == "force") + self._create_minimal_env_py(env_path) def _cleanup_existing_alembic(self) -> None: """ @@ -106,43 +137,34 @@ def _ensure_alembic_setup(self, *, force: bool = False) -> None: self._initialize_alembic() logger.info("Alembic initialization complete") - def _initialize_alembic(self) -> str: - """Initializes Alembic configuration in the local directory.""" + def _initialize_alembic(self) -> None: logger.info("Initializing Alembic configuration...") - # Check if versions exists - has_versions = (self.alembic_dir / 'versions').exists() - logger.info(f"Existing versions directory found: {has_versions}") - - # Create base directories + # Create directories first self.alembic_dir.mkdir(exist_ok=True) - if not has_versions: - (self.alembic_dir / 'versions').mkdir(exist_ok=True) + versions_dir = self.alembic_dir / 'versions' + versions_dir.mkdir(exist_ok=True) + + # Create env.py BEFORE running command.init + env_path = self.alembic_dir / 'env.py' + if not env_path.exists(): + self._create_minimal_env_py(env_path) + logger.info("Created new env.py") # Write alembic.ini - ini_content = self._generate_alembic_ini_content() + config_content = self._generate_alembic_ini_content() with open(self.alembic_ini_path, 'w') as f: - f.write(ini_content) + f.write(config_content) logger.info("Created alembic.ini") - if not has_versions: - # Only run init if no versions directory + # Now run alembic init + try: config = self.get_alembic_config() command.init(config, str(self.alembic_dir)) - logger.info("Initialized new Alembic directory structure") - else: - # Create minimal env.py if it doesn't exist - env_path = self.alembic_dir / 'env.py' - if not env_path.exists(): - self._create_minimal_env_py(env_path) - logger.info("Created minimal env.py") - else: - # Update existing env.py - self._update_env_py(env_path) - logger.info("Updated existing env.py") - - logger.info(f"Alembic setup completed at {self.base_dir}") - return str(self.alembic_ini_path) + logger.info("Initialized Alembic directory structure") + except CommandError as e: + if "already exists" not in str(e): + raise def _create_minimal_env_py(self, env_path: Path) -> None: """Creates a minimal env.py file for Alembic.""" @@ -242,6 +264,9 @@ def _update_env_py(self, env_path: Path) -> None: """ Updates the env.py file to use SQLModel metadata. """ + if not env_path.exists(): + self._create_minimal_env_py(env_path) + return try: with open(env_path, 'r') as f: content = f.read() @@ -297,8 +322,17 @@ def _ensure_alembic_setup(self, *, force: bool = False) -> None: def _validate_alembic_setup(self) -> None: """Validates that Alembic is properly configured.""" - if not self.alembic_ini_path.exists(): - raise FileNotFoundError("Alembic configuration not found") + required_files = [ + self.alembic_ini_path, + self.alembic_dir / 'env.py', + self.alembic_dir / 'versions' + ] + + missing = [f for f in required_files if not f.exists()] + if missing: + raise FileNotFoundError( + f"Alembic configuration incomplete. Missing: {', '.join(str(f) for f in missing)}" + ) def get_alembic_config(self) -> Config: """ diff --git a/python/packages/autogen-studio/autogenstudio/datamodel/types.py b/python/packages/autogen-studio/autogenstudio/datamodel/types.py index ad8f2088ea3f..5fec5d984bec 100644 --- a/python/packages/autogen-studio/autogenstudio/datamodel/types.py +++ b/python/packages/autogen-studio/autogenstudio/datamodel/types.py @@ -17,7 +17,7 @@ class ToolTypes(str, Enum): class AgentTypes(str, Enum): ASSISTANT = "AssistantAgent" - CODING = "CodingAssistantAgent" + USERPROXY = "UserProxyAgent" class TeamTypes(str, Enum): diff --git a/python/packages/autogen-studio/autogenstudio/teammanager.py b/python/packages/autogen-studio/autogenstudio/teammanager.py index e50f740472d6..ebe4302c5f58 100644 --- a/python/packages/autogen-studio/autogenstudio/teammanager.py +++ b/python/packages/autogen-studio/autogenstudio/teammanager.py @@ -1,8 +1,8 @@ -from typing import AsyncGenerator, Union, Optional +from typing import AsyncGenerator, Callable, Union, Optional import time -from .database import ComponentFactory +from .database import ComponentFactory, Component from .datamodel import TeamResult, TaskResult, ComponentConfigInput -from autogen_agentchat.messages import InnerMessage, ChatMessage +from autogen_agentchat.messages import ChatMessage, AgentMessage from autogen_core.base import CancellationToken @@ -10,19 +10,37 @@ class TeamManager: def __init__(self) -> None: self.component_factory = ComponentFactory() + async def _create_team( + self, + team_config: ComponentConfigInput, + input_func: Optional[Callable] = None + ) -> Component: + """Create team instance with common setup logic""" + return await self.component_factory.load( + team_config, + input_func=input_func + ) + + def _create_result(self, task_result: TaskResult, start_time: float) -> TeamResult: + """Create TeamResult with timing info""" + return TeamResult( + task_result=task_result, + usage="", + duration=time.time() - start_time + ) + async def run_stream( self, task: str, team_config: ComponentConfigInput, + input_func: Optional[Callable] = None, cancellation_token: Optional[CancellationToken] = None - ) -> AsyncGenerator[Union[InnerMessage, ChatMessage, TaskResult], None]: + ) -> AsyncGenerator[Union[AgentMessage, ChatMessage, TaskResult], None]: """Stream the team's execution results""" start_time = time.time() try: - # Let factory handle all config processing - team = await self.component_factory.load(team_config) - + team = await self._create_team(team_config, input_func) stream = team.run_stream( task=task, cancellation_token=cancellation_token @@ -33,11 +51,7 @@ async def run_stream( break if isinstance(message, TaskResult): - yield TeamResult( - task_result=message, - usage="", - duration=time.time() - start_time - ) + yield self._create_result(message, start_time) else: yield message @@ -48,20 +62,16 @@ async def run( self, task: str, team_config: ComponentConfigInput, + input_func: Optional[Callable] = None, cancellation_token: Optional[CancellationToken] = None ) -> TeamResult: """Original non-streaming run method with optional cancellation""" start_time = time.time() - # Let factory handle all config processing - team = await self.component_factory.load(team_config) + team = await self._create_team(team_config, input_func) result = await team.run( task=task, cancellation_token=cancellation_token ) - return TeamResult( - task_result=result, - usage="", - duration=time.time() - start_time - ) + return self._create_result(result, start_time) diff --git a/python/packages/autogen-studio/autogenstudio/version.py b/python/packages/autogen-studio/autogenstudio/version.py index bf51c1b62b6e..a6f047da1cb8 100644 --- a/python/packages/autogen-studio/autogenstudio/version.py +++ b/python/packages/autogen-studio/autogenstudio/version.py @@ -1,3 +1,3 @@ -VERSION = "0.1.6" +VERSION = "0.4.0.dev37" __version__ = VERSION APP_NAME = "autogenstudio" diff --git a/python/packages/autogen-studio/autogenstudio/web/app.py b/python/packages/autogen-studio/autogenstudio/web/app.py index 8a0c2ce19ab4..8d62cf1adffb 100644 --- a/python/packages/autogen-studio/autogenstudio/web/app.py +++ b/python/packages/autogen-studio/autogenstudio/web/app.py @@ -34,7 +34,7 @@ async def lifespan(app: FastAPI) -> AsyncGenerator[None, None]: logger.info("Initializing application...") try: # Initialize managers (DB, Connection, Team) - await init_managers(initializer.database_uri, initializer.config_dir) + await init_managers(initializer.database_uri, initializer.config_dir, initializer.app_root) logger.info("Managers initialized successfully") # Any other initialization code diff --git a/python/packages/autogen-studio/autogenstudio/web/deps.py b/python/packages/autogen-studio/autogenstudio/web/deps.py index b4c08e952aeb..1767c004eee0 100644 --- a/python/packages/autogen-studio/autogenstudio/web/deps.py +++ b/python/packages/autogen-studio/autogenstudio/web/deps.py @@ -86,7 +86,7 @@ async def get_current_user( # Manager initialization and cleanup -async def init_managers(database_uri: str, config_dir: str) -> None: +async def init_managers(database_uri: str, config_dir: str, app_root: str) -> None: """Initialize all manager instances""" global _db_manager, _websocket_manager, _team_manager @@ -95,7 +95,7 @@ async def init_managers(database_uri: str, config_dir: str) -> None: try: # Initialize database manager _db_manager = DatabaseManager( - engine_uri=database_uri, auto_upgrade=settings.UPGRADE_DATABASE) + engine_uri=database_uri, auto_upgrade=settings.UPGRADE_DATABASE, base_dir=app_root) _db_manager.create_db_and_tables() # init default team config diff --git a/python/packages/autogen-studio/autogenstudio/web/managers/__init__.py b/python/packages/autogen-studio/autogenstudio/web/managers/__init__.py index e69de29bb2d1..6a77bed8fec1 100644 --- a/python/packages/autogen-studio/autogenstudio/web/managers/__init__.py +++ b/python/packages/autogen-studio/autogenstudio/web/managers/__init__.py @@ -0,0 +1 @@ +from .connection import WebSocketManager diff --git a/python/packages/autogen-studio/autogenstudio/web/managers/connection.py b/python/packages/autogen-studio/autogenstudio/web/managers/connection.py index 0a0d5fb7cb4b..5ed613cbb837 100644 --- a/python/packages/autogen-studio/autogenstudio/web/managers/connection.py +++ b/python/packages/autogen-studio/autogenstudio/web/managers/connection.py @@ -1,13 +1,15 @@ +import asyncio from autogen_agentchat.base._task import TaskResult from fastapi import WebSocket, WebSocketDisconnect -from typing import Dict, Optional, Any +from typing import Callable, Dict, Optional, Any from uuid import UUID import logging from datetime import datetime, timezone from ...datamodel import Run, RunStatus, TeamResult from ...database import DatabaseManager -from autogen_agentchat.messages import InnerMessage, ChatMessage, TextMessage +from ...teammanager import TeamManager +from autogen_agentchat.messages import AgentMessage, ChatMessage, TextMessage from autogen_core.base import CancellationToken logger = logging.getLogger(__name__) @@ -22,25 +24,22 @@ def __init__(self, db_manager: DatabaseManager): self._cancellation_tokens: Dict[UUID, CancellationToken] = {} # Track explicitly closed connections self._closed_connections: set[UUID] = set() + self._input_responses: Dict[UUID, asyncio.Queue] = {} self._cancel_message = TeamResult(task_result=TaskResult(messages=[TextMessage( source="user", content="Run cancelled by user")], stop_reason="cancelled by user"), usage="", duration=0).model_dump() - async def connect(self, websocket: WebSocket, run_id: UUID) -> bool: - """Initialize WebSocket connection for a run - - Args: - websocket: The WebSocket connection to initialize - run_id: UUID of the run to associate with this connection + def _get_stop_message(self, reason: str) -> dict: + return TeamResult(task_result=TaskResult(messages=[TextMessage( + source="user", content=reason)], stop_reason=reason), usage="", duration=0).model_dump() - Returns: - bool: True if connection was successful, False otherwise - """ + async def connect(self, websocket: WebSocket, run_id: UUID) -> bool: try: await websocket.accept() self._connections[run_id] = websocket - # Remove from closed set if reconnecting self._closed_connections.discard(run_id) + # Initialize input queue for this connection + self._input_responses[run_id] = asyncio.Queue() run = await self._get_run(run_id) if run: @@ -54,7 +53,6 @@ async def connect(self, websocket: WebSocket, run_id: UUID) -> bool: }) return True - except Exception as e: logger.error(f"Connection error for run {run_id}: {e}") return False @@ -62,18 +60,10 @@ async def connect(self, websocket: WebSocket, run_id: UUID) -> bool: async def start_stream( self, run_id: UUID, - team_manager: Any, + team_manager: TeamManager, task: str, team_config: dict ) -> None: - """Start streaming task execution with improved error handling - - Args: - run_id: UUID of the run - team_manager: Instance of the team manager - task: Task string to execute - team_config: Team configuration dictionary - """ if run_id not in self._connections or run_id in self._closed_connections: raise ValueError(f"No active connection for run {run_id}") @@ -81,9 +71,13 @@ async def start_stream( self._cancellation_tokens[run_id] = cancellation_token try: + # Create input function for this run + input_func = self.create_input_func(run_id) + async for message in team_manager.run_stream( task=task, team_config=team_config, + input_func=input_func, # Pass the input function cancellation_token=cancellation_token ): if cancellation_token.is_cancelled() or run_id in self._closed_connections: @@ -113,7 +107,44 @@ async def start_stream( finally: self._cancellation_tokens.pop(run_id, None) - async def stop_run(self, run_id: UUID) -> None: + def create_input_func(self, run_id: UUID) -> Callable: + """Creates an input function for a specific run""" + async def input_handler(prompt: str = "") -> str: + try: + + # Send input request to client + await self._send_message(run_id, { + "type": "input_request", + "prompt": prompt, + "data": { + "source": "system", + "content": prompt + }, + "timestamp": datetime.now(timezone.utc).isoformat() + }) + + # Wait for response + if run_id in self._input_responses: + response = await self._input_responses[run_id].get() + return response + else: + raise ValueError(f"No input queue for run {run_id}") + + except Exception as e: + logger.error(f"Error handling input for run {run_id}: {e}") + raise + + return input_handler + + async def handle_input_response(self, run_id: UUID, response: str) -> None: + """Handle input response from client""" + if run_id in self._input_responses: + await self._input_responses[run_id].put(response) + else: + logger.warning( + f"Received input response for inactive run {run_id}") + + async def stop_run(self, run_id: UUID, reason: str) -> None: """Stop a running task""" if run_id in self._cancellation_tokens: logger.info(f"Stopping run {run_id}") @@ -125,7 +156,7 @@ async def stop_run(self, run_id: UUID) -> None: await self._send_message(run_id, { "type": "completion", "status": "cancelled", - "data": self._cancel_message, + "data": self._get_stop_message(reason), "timestamp": datetime.now(timezone.utc).isoformat() }) except Exception: @@ -139,11 +170,12 @@ async def disconnect(self, run_id: UUID) -> None: self._closed_connections.add(run_id) # Cancel any running tasks - await self.stop_run(run_id) + await self.stop_run(run_id, "Connection closed") # Clean up resources self._connections.pop(run_id, None) self._cancellation_tokens.pop(run_id, None) + self._input_responses.pop(run_id, None) async def _send_message(self, run_id: UUID, message: dict) -> None: """Send a message through the WebSocket with connection state checking @@ -203,7 +235,7 @@ def _format_message(self, message: Any) -> Optional[dict]: Optional[dict]: Formatted message or None if formatting fails """ try: - if isinstance(message, (InnerMessage, ChatMessage)): + if isinstance(message, (AgentMessage, ChatMessage)): return { "type": "message", "data": message.model_dump() @@ -251,6 +283,41 @@ async def _update_run_status( run.error_message = error self.db_manager.upsert(run) + async def cleanup(self) -> None: + """Clean up all active connections and resources when server is shutting down""" + logger.info( + f"Cleaning up {len(self.active_connections)} active connections") + + try: + # First cancel all running tasks + for run_id in self.active_runs.copy(): + if run_id in self._cancellation_tokens: + self._cancellation_tokens[run_id].cancel() + + # Then disconnect all websockets with timeout + # 10 second timeout for entire cleanup + async with asyncio.timeout(10): + for run_id in self.active_connections.copy(): + try: + # Give each disconnect operation 2 seconds + async with asyncio.timeout(2): + await self.disconnect(run_id) + except asyncio.TimeoutError: + logger.warning(f"Timeout disconnecting run {run_id}") + except Exception as e: + logger.error(f"Error disconnecting run {run_id}: {e}") + + except asyncio.TimeoutError: + logger.warning("WebSocketManager cleanup timed out") + except Exception as e: + logger.error(f"Error during WebSocketManager cleanup: {e}") + finally: + # Always clear internal state, even if cleanup had errors + self._connections.clear() + self._cancellation_tokens.clear() + self._closed_connections.clear() + self._input_responses.clear() + @property def active_connections(self) -> set[UUID]: """Get set of active run IDs""" diff --git a/python/packages/autogen-studio/autogenstudio/web/routes/ws.py b/python/packages/autogen-studio/autogenstudio/web/routes/ws.py index 8fd6844ff3ad..75152036fc29 100644 --- a/python/packages/autogen-studio/autogenstudio/web/routes/ws.py +++ b/python/packages/autogen-studio/autogenstudio/web/routes/ws.py @@ -8,6 +8,7 @@ from ..deps import get_websocket_manager, get_db, get_team_manager from ...datamodel import Run, RunStatus +from ..managers import WebSocketManager router = APIRouter() logger = logging.getLogger(__name__) @@ -17,7 +18,7 @@ async def run_websocket( websocket: WebSocket, run_id: UUID, - ws_manager=Depends(get_websocket_manager), + ws_manager: WebSocketManager = Depends(get_websocket_manager), db=Depends(get_db), team_manager=Depends(get_team_manager) ): @@ -48,8 +49,10 @@ async def run_websocket( message = json.loads(raw_message) if message.get("type") == "stop": - logger.info(f"Received stop request for run {run_id}") - await ws_manager.stop_run(run_id) + print(f"Received stop request for run {run_id}") + reason = message.get( + "reason") or "User requested stop/cancellation" + await ws_manager.stop_run(run_id, reason=reason) break elif message.get("type") == "ping": @@ -58,6 +61,15 @@ async def run_websocket( "timestamp": datetime.utcnow().isoformat() }) + elif message.get("type") == "input_response": + # Handle input response from client + response = message.get("response") + if response is not None: + await ws_manager.handle_input_response(run_id, response) + else: + logger.warning( + f"Invalid input response format for run {run_id}") + except json.JSONDecodeError: logger.warning(f"Invalid JSON received: {raw_message}") await websocket.send_json({ diff --git a/python/packages/autogen-studio/docs/ags_screen.png b/python/packages/autogen-studio/docs/ags_screen.png new file mode 100644 index 000000000000..30982cf6a828 --- /dev/null +++ b/python/packages/autogen-studio/docs/ags_screen.png @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aa05d0e55262549880e3e06ca3839cffdbcd0836bd2b98690185255f03dc98ac +size 194702 diff --git a/python/packages/autogen-studio/docs/ara_stockprices.png b/python/packages/autogen-studio/docs/ara_stockprices.png deleted file mode 100644 index f5adf6256e55..000000000000 --- a/python/packages/autogen-studio/docs/ara_stockprices.png +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:5e3340a765da6dff6585c8b2e8a4014df0c94b537d62d341d2d0d45627bbc345 -size 198222 diff --git a/python/packages/autogen-studio/frontend/package.json b/python/packages/autogen-studio/frontend/package.json index 0123df464575..f205a7a1e44d 100644 --- a/python/packages/autogen-studio/frontend/package.json +++ b/python/packages/autogen-studio/frontend/package.json @@ -17,29 +17,29 @@ "typecheck": "tsc --noEmit" }, "dependencies": { - "@ant-design/charts": "^2.2.1", + "@ant-design/charts": "^2.2.3", "@ant-design/plots": "^2.2.2", "@dagrejs/dagre": "^1.1.4", - "@headlessui/react": "^1.7.16", + "@headlessui/react": "^2.2.0", "@heroicons/react": "^2.0.18", "@mdx-js/react": "^3.1.0", "@monaco-editor/react": "^4.6.0", "@tailwindcss/typography": "^0.5.9", "@xyflow/react": "^12.3.5", - "antd": "^5.1.0", + "antd": "^5.22.1", "autoprefixer": "^10.4.20", - "gatsby": "^5.13.7", - "gatsby-plugin-image": "^3.13.1", - "gatsby-plugin-manifest": "^5.13.1", - "gatsby-plugin-mdx": "^5.13.1", - "gatsby-plugin-postcss": "^6.13.1", - "gatsby-plugin-sharp": "^5.13.1", - "gatsby-plugin-sitemap": "^6.13.1", - "gatsby-source-filesystem": "^5.13.1", - "gatsby-transformer-sharp": "^5.13.1", + "gatsby": "^5.14.0", + "gatsby-plugin-image": "^3.14.0", + "gatsby-plugin-manifest": "^5.14.0", + "gatsby-plugin-mdx": "^5.14.0", + "gatsby-plugin-postcss": "^6.14.0", + "gatsby-plugin-sharp": "^5.14.0", + "gatsby-plugin-sitemap": "^6.14.0", + "gatsby-source-filesystem": "^5.14.0", + "gatsby-transformer-sharp": "^5.14.0", "install": "^0.13.0", - "lucide-react": "^0.454.0", - "postcss": "^8.4.47", + "lucide-react": "^0.456.0", + "postcss": "^8.4.49", "react": "^18.2.0", "react-dom": "^18.2.0", "react-markdown": "^9.0.1", @@ -49,7 +49,7 @@ }, "devDependencies": { "@types/lodash.debounce": "^4.0.9", - "@types/node": "^20.11.19", + "@types/node": "^22.9.0", "@types/react": "^18.2.55", "@types/react-dom": "^18.2.19", "@types/react-syntax-highlighter": "^15.5.10", diff --git a/python/packages/autogen-studio/frontend/src/components/contentheader.tsx b/python/packages/autogen-studio/frontend/src/components/contentheader.tsx index b66bc0d7da27..0309b51dbc89 100644 --- a/python/packages/autogen-studio/frontend/src/components/contentheader.tsx +++ b/python/packages/autogen-studio/frontend/src/components/contentheader.tsx @@ -15,9 +15,9 @@ import { import { Tooltip } from "antd"; import { appContext } from "../hooks/provider"; import { useConfigStore } from "../hooks/store"; +import { Link } from "gatsby"; type ContentHeaderProps = { - title?: string; onMobileMenuToggle: () => void; isMobileMenuOpen: boolean; }; @@ -27,13 +27,13 @@ const classNames = (...classes: (string | undefined | boolean)[]) => { }; const ContentHeader = ({ - title, onMobileMenuToggle, isMobileMenuOpen, }: ContentHeaderProps) => { const { darkMode, setDarkMode, user, logout } = React.useContext(appContext); - const { sidebar, setSidebarState } = useConfigStore(); + const { sidebar, setSidebarState, header } = useConfigStore(); const { isExpanded } = sidebar; + const { title, breadcrumbs } = header; return (
@@ -68,24 +68,63 @@ const ContentHeader = ({
- {/* Search */} -
-
+ {/* Breadcrumbs */} +
+ {breadcrumbs && breadcrumbs.length > 0 ? ( + + ) : ( +

{title}

+ )} +
+ + {/* Right side header items */} +
+ {/* Search */} + - + -
- {/* Right side header items */} -
{/* Dark Mode Toggle */}
+
+ ); +}; diff --git a/python/packages/autogen-studio/frontend/src/components/views/playground/chat/chat.tsx b/python/packages/autogen-studio/frontend/src/components/views/playground/chat/chat.tsx index cdb0ac66776e..473c90b4f98c 100644 --- a/python/packages/autogen-studio/frontend/src/components/views/playground/chat/chat.tsx +++ b/python/packages/autogen-studio/frontend/src/components/views/playground/chat/chat.tsx @@ -3,15 +3,18 @@ import { message } from "antd"; import { getServerUrl } from "../../../utils"; import { SessionManager } from "../../shared/session/manager"; import { IStatus } from "../../../types/app"; -import { Message } from "../../../types/datamodel"; +import { + Message, + ThreadStatus, + WebSocketMessage, +} from "../../../types/datamodel"; import { useConfigStore } from "../../../../hooks/store"; import { appContext } from "../../../../hooks/provider"; import ChatInput from "./chatinput"; -import { ModelUsage, SocketMessage, ThreadState, ThreadStatus } from "./types"; +import { ModelUsage, ThreadState, TIMEOUT_CONFIG } from "./types"; import { MessageList } from "./messagelist"; import TeamManager from "../../shared/team/manager"; import { teamAPI } from "../../shared/team/api"; -import AgentFlow from "./agentflow/agentflow"; const logo = require("../../../../images/landing/welcome.svg").default; @@ -31,12 +34,14 @@ export default function ChatView({ Record >({}); const chatContainerRef = React.useRef(null); + const timeoutRefs = React.useRef>({}); const { user } = React.useContext(appContext); const { session, sessions } = useConfigStore(); const [activeSockets, setActiveSockets] = React.useState< Record >({}); + const activeSocketsRef = React.useRef>({}); const [teamConfig, setTeamConfig] = React.useState(null); @@ -58,12 +63,114 @@ export default function ChatView({ } }, [session]); + const updateSocket = (runId: string, socket: WebSocket | null) => { + if (socket) { + activeSocketsRef.current[runId] = socket; + setActiveSockets((prev) => ({ ...prev, [runId]: socket })); + } else { + delete activeSocketsRef.current[runId]; + setActiveSockets((prev) => { + const next = { ...prev }; + delete next[runId]; + return next; + }); + } + }; + React.useEffect(() => { return () => { Object.values(activeSockets).forEach((socket) => socket.close()); }; }, [activeSockets]); + const handleTimeoutForRun = (runId: string) => { + const socket = activeSocketsRef.current[runId]; + if (socket && socket.readyState === WebSocket.OPEN) { + // Send stop message to backend, just like when user clicks stop + socket.send( + JSON.stringify({ + type: "stop", + reason: TIMEOUT_CONFIG.DEFAULT_MESSAGE, + }) + ); + } + + // Update thread state with timeout reason + setThreadMessages((prev) => { + const currentThread = prev[runId]; + if (!currentThread) return prev; + + return { + ...prev, + [runId]: { + ...currentThread, + status: "cancelled", // Use existing cancelled status + reason: "Input request timed out after 3 minutes", + isExpanded: true, + inputRequest: currentThread.inputRequest + ? { + prompt: currentThread.inputRequest.prompt, + isPending: true, + } + : undefined, + }, + }; + }); + + if (timeoutRefs.current[runId]) { + clearTimeout(timeoutRefs.current[runId]); + delete timeoutRefs.current[runId]; + } + }; + + const handleInputResponse = async (runId: string, response: string) => { + // Clear timeout when response is received + if (timeoutRefs.current[runId]) { + clearTimeout(timeoutRefs.current[runId]); + delete timeoutRefs.current[runId]; + } + + if (response === "TIMEOUT") { + handleTimeoutForRun(runId); + return; + } + + const socket = activeSockets[runId]; + if (socket && socket.readyState === WebSocket.OPEN) { + try { + socket.send( + JSON.stringify({ + type: "input_response", + response: response, + }) + ); + + setThreadMessages((prev) => ({ + ...prev, + [runId]: { + ...prev[runId], + status: "streaming", + inputRequest: undefined, + }, + })); + } catch (error) { + console.error("Error sending input response:", error); + message.error("Failed to send response"); + + setThreadMessages((prev) => ({ + ...prev, + [runId]: { + ...prev[runId], + status: "error", + reason: "Failed to send input response", + }, + })); + } + } else { + message.error("Connection lost. Please try again."); + } + }; + const getBaseUrl = (url: string): string => { try { // Remove protocol (http:// or https://) @@ -91,7 +198,7 @@ export default function ChatView({ const createRun = async (sessionId: number): Promise => { const payload = { session_id: sessionId, user_id: user?.email || "" }; - const response = await fetch(`${serverUrl}/runs`, { + const response = await fetch(`${serverUrl}/runs/`, { method: "POST", headers: { "Content-Type": "application/json" }, body: JSON.stringify(payload), @@ -128,40 +235,32 @@ export default function ChatView({ return await response.json(); }; - interface RequestUsage { - prompt_tokens: number; - completion_tokens: number; - } - const connectWebSocket = (runId: string, query: string) => { const baseUrl = getBaseUrl(serverUrl); - // Determine if we should use ws:// or wss:// based on current protocol const wsProtocol = window.location.protocol === "https:" ? "wss:" : "ws:"; const wsUrl = `${wsProtocol}//${baseUrl}/api/ws/runs/${runId}`; - console.log("Connecting to WebSocket URL:", wsUrl); // For debugging - const socket = new WebSocket(wsUrl); let isClosing = false; + const clearTimeoutForRun = () => { + if (timeoutRefs.current[runId]) { + clearTimeout(timeoutRefs.current[runId]); + delete timeoutRefs.current[runId]; + } + }; + const closeSocket = () => { if (!isClosing && socket.readyState !== WebSocket.CLOSED) { isClosing = true; socket.close(); - setActiveSockets((prev) => { - const newSockets = { ...prev }; - delete newSockets[runId]; - return newSockets; - }); + updateSocket(runId, null); } }; socket.onopen = async () => { try { - setActiveSockets((prev) => ({ - ...prev, - [runId]: socket, - })); + updateSocket(runId, socket); setThreadMessages((prev) => ({ ...prev, @@ -187,13 +286,9 @@ export default function ChatView({ }) ); - // Start the run only after socket is connected await startRun(runId, query); } catch (error) { - console.error("Error starting run:", error); - message.error("Failed to start run"); closeSocket(); - setThreadMessages((prev) => ({ ...prev, [runId]: { @@ -206,11 +301,32 @@ export default function ChatView({ }; socket.onmessage = (event) => { - const message: SocketMessage = JSON.parse(event.data); - // console.log("WebSocket message received:", message); + const message: WebSocketMessage = JSON.parse(event.data); switch (message.type) { + case "input_request": + clearTimeoutForRun(); + + timeoutRefs.current[runId] = setTimeout(() => { + handleTimeoutForRun(runId); + }, TIMEOUT_CONFIG.DURATION_MS); + + setThreadMessages((prev) => ({ + ...prev, + [runId]: { + ...prev[runId], + status: "awaiting_input", + inputRequest: { + prompt: message.data?.content || "", + isPending: false, + }, + }, + })); + break; + case "message": + clearTimeoutForRun(); + setThreadMessages((prev) => { const currentThread = prev[runId] || { messages: [], @@ -227,17 +343,18 @@ export default function ChatView({ } : undefined; - const newMessage = { - source: message.data?.source || "", - content: message.data?.content || "", - models_usage, - }; - return { ...prev, [runId]: { ...currentThread, - messages: [...currentThread.messages, newMessage], + messages: [ + ...currentThread.messages, + { + source: message.data?.source || "", + content: message.data?.content || "", + models_usage, + }, + ], status: "streaming", }, }; @@ -246,30 +363,27 @@ export default function ChatView({ case "result": case "completion": + clearTimeoutForRun(); + setThreadMessages((prev) => { const currentThread = prev[runId]; if (!currentThread) return prev; - const finalMessage = message.data?.task_result?.messages - ?.filter((msg: any) => msg.content !== "TERMINATE") - .pop(); - const status: ThreadStatus = message.status || "complete"; - // Capture completion reason from task_result const reason = message.data?.task_result?.stop_reason || (message.error ? `Error: ${message.error}` : undefined); - console.log("All Messages", currentThread.messages); return { ...prev, [runId]: { ...currentThread, - status: status, - reason: reason, + status, + reason, isExpanded: true, - finalResult: finalMessage, - messages: currentThread.messages, + finalResult: message.data?.task_result?.messages + ?.filter((msg: any) => msg.content !== "TERMINATE") + .pop(), }, }; }); @@ -279,16 +393,10 @@ export default function ChatView({ }; socket.onclose = (event) => { - console.log( - `WebSocket closed for run ${runId}. Code: ${event.code}, Reason: ${event.reason}` - ); + clearTimeoutForRun(); if (!isClosing) { - setActiveSockets((prev) => { - const newSockets = { ...prev }; - delete newSockets[runId]; - return newSockets; - }); + updateSocket(runId, null); setThreadMessages((prev) => { const thread = prev[runId]; @@ -297,7 +405,10 @@ export default function ChatView({ ...prev, [runId]: { ...thread, - status: "complete", + status: + event.code === TIMEOUT_CONFIG.WEBSOCKET_CODE + ? "timeout" + : "complete", reason: event.reason || "Connection closed", }, }; @@ -308,8 +419,7 @@ export default function ChatView({ }; socket.onerror = (error) => { - console.error("WebSocket error:", error); - message.error("WebSocket connection error"); + clearTimeoutForRun(); setThreadMessages((prev) => { const thread = prev[runId]; @@ -335,7 +445,9 @@ export default function ChatView({ const cancelRun = async (runId: string) => { const socket = activeSockets[runId]; if (socket && socket.readyState === WebSocket.OPEN) { - socket.send(JSON.stringify({ type: "stop" })); + socket.send( + JSON.stringify({ type: "stop", reason: "Cancelled by user" }) + ); setThreadMessages((prev) => ({ ...prev, @@ -349,6 +461,16 @@ export default function ChatView({ } }; + // Clean up timeouts when component unmounts + React.useEffect(() => { + return () => { + Object.entries(timeoutRefs.current).forEach(([_, timeout]) => + clearTimeout(timeout) + ); + timeoutRefs.current = {}; + }; + }, []); + const runTask = async (query: string) => { setError(null); setLoading(true); @@ -429,7 +551,7 @@ export default function ChatView({
- {sessions && sessions?.length === 0 ? ( + {sessions !== null && sessions?.length === 0 ? (
Welcome @@ -454,7 +577,14 @@ export default function ChatView({ <> {session && (
- + thread.status === "awaiting_input" + )} // Disable input while waiting for user input + />
)} diff --git a/python/packages/autogen-studio/frontend/src/components/views/playground/chat/chatinput.tsx b/python/packages/autogen-studio/frontend/src/components/views/playground/chat/chatinput.tsx index b170f4bcc4c8..712c6936b585 100644 --- a/python/packages/autogen-studio/frontend/src/components/views/playground/chat/chatinput.tsx +++ b/python/packages/autogen-studio/frontend/src/components/views/playground/chat/chatinput.tsx @@ -12,17 +12,21 @@ interface ChatInputProps { onSubmit: (text: string) => void; loading: boolean; error: IStatus | null; + disabled?: boolean; } + export default function ChatInput({ onSubmit, loading, error, + disabled = false, }: ChatInputProps) { const textAreaRef = React.useRef(null); const [previousLoading, setPreviousLoading] = React.useState(loading); const [text, setText] = React.useState(""); const textAreaDefaultHeight = "64px"; + const isInputDisabled = disabled || loading; // Handle textarea auto-resize React.useEffect(() => { @@ -54,10 +58,9 @@ export default function ChatInput({ }; const handleSubmit = () => { - if (textAreaRef.current?.value && !loading) { + if (textAreaRef.current?.value && !isInputDisabled) { const query = textAreaRef.current.value; onSubmit(query); - // Don't reset immediately - wait for response to complete } }; @@ -69,10 +72,10 @@ export default function ChatInput({ }; return ( -
+
-
- {!loading ? ( -
- -
+ {loading ? ( + ) : ( -
- -
+ )} -
+
diff --git a/python/packages/autogen-studio/frontend/src/components/views/playground/chat/messagelist.tsx b/python/packages/autogen-studio/frontend/src/components/views/playground/chat/messagelist.tsx index 613f96fd2289..d25a508e7bef 100644 --- a/python/packages/autogen-studio/frontend/src/components/views/playground/chat/messagelist.tsx +++ b/python/packages/autogen-studio/frontend/src/components/views/playground/chat/messagelist.tsx @@ -14,9 +14,12 @@ import { Loader2, CheckCircle, AlertTriangle, + TriangleAlertIcon, + GroupIcon, } from "lucide-react"; import AgentFlow from "./agentflow/agentflow"; import ThreadView from "./threadview"; +import LoadingDots from "../../shared/atoms"; interface MessageListProps { messages: Message[]; @@ -26,6 +29,7 @@ interface MessageListProps { >; onRetry: (content: string) => void; onCancel: (runId: string) => void; + onInputResponse: (runId: string, response: string) => void; loading?: boolean; teamConfig?: TeamConfig; } @@ -41,6 +45,7 @@ export const MessageList: React.FC = ({ setThreadMessages, onRetry, onCancel, + onInputResponse, // New prop loading = false, teamConfig, }) => { @@ -75,7 +80,7 @@ export const MessageList: React.FC = ({ } } }); - }, [threadMessages]); // This will trigger when any thread messages update + }, [threadMessages]); const toggleThread = (runId: string) => { setThreadMessages((prev) => ({ @@ -107,21 +112,43 @@ export const MessageList: React.FC = ({ size={20} className="inline-block mr-1 text-accent animate-spin" />{" "} - Processing ... + Processing{" "} + +
+ ); + case "awaiting_input": // New status + return ( +
+ {" "} + Waiting for your input
); - case "complete": return ( - +
+ {" "} + Task completed +
); case "error": return ( - +
+ {" "} + An error occurred. +
); case "cancelled": return ( - +
+ {" "} + Task was cancelled. +
); default: return null; @@ -135,26 +162,51 @@ export const MessageList: React.FC = ({ const thread = threadMessages[botMessage.run_id]; const hasThread = thread && thread.messages.length > 0; const isStreaming = thread?.status === "streaming"; + const isAwaitingInput = thread?.status === "awaiting_input"; // New check + + const isFirstMessage = pairIndex === 0; return (
- {/* User message - first */} + {/* User message */} + { +
+ {/*
Task Run 1.
*/} +
+ Run {pairIndex + 1} + {!isFirstMessage && ( + <> + {" "} + |{" "} + {" "} + Note: Each run does not share data with previous runs in + the same session yet.{" "} + + )} +
+
+ }
- User + You
-
+
- {/* Team response - second */} + + {/* Team response */}
- +
Agent Team @@ -162,18 +214,18 @@ export const MessageList: React.FC = ({
{/* Main response container */} -
-
+
+
{getStatusIcon(thread?.status)}{" "} - {thread?.finalResult?.content} + {!isAwaitingInput && thread?.finalResult?.content}
- {/* Thread section with left border for hierarchy */} + {/* Thread section */} {hasThread && (
-
+
+
+
+ ); +}; + const ThreadView: React.FC = ({ thread, isStreaming, runId, onCancel, + onInputResponse, threadContainerRef, }) => { + const isAwaitingInput = thread.status === "awaiting_input"; + const isTimedOut = thread.status === "timeout"; + + const getStatusIcon = () => { + switch (thread.status) { + case "streaming": + return ; + case "awaiting_input": + return ; + case "complete": + return ; + case "error": + return ; + case "timeout": + return ; + default: + return null; + } + }; + + const getStatusText = () => { + if (isStreaming) { + return ( + <> + Agents working + + + ); + } + if (isAwaitingInput) return "Waiting for your input"; + if (isTimedOut) return TIMEOUT_CONFIG.DEFAULT_MESSAGE; + if (thread.reason) + return ( + <> + Stop Reason: + {thread.reason} + + ); + return null; + }; + + const handleTimeout = () => { + if (thread.inputRequest) { + onInputResponse(runId, "TIMEOUT"); + } + }; + return (
- {/* Status bar - fixed at top */}
-
- {isStreaming ? ( - "Agents discussing..." - ) : ( - <> - Stop Reason - {thread.reason} - - )} +
+ {getStatusIcon()} + {getStatusText()}
- {isStreaming && ( + {(isStreaming || isAwaitingInput) && (
- {/* Thread messages and flow visualization in tabs */}
= ({ />
))} + + {thread.inputRequest && ( + onInputResponse(runId, response)} + disabled={!isAwaitingInput || isTimedOut} + onTimeout={handleTimeout} + /> + )}
diff --git a/python/packages/autogen-studio/frontend/src/components/views/playground/chat/types.ts b/python/packages/autogen-studio/frontend/src/components/views/playground/chat/types.ts index 565b1fc822d3..fa169da734d1 100644 --- a/python/packages/autogen-studio/frontend/src/components/views/playground/chat/types.ts +++ b/python/packages/autogen-studio/frontend/src/components/views/playground/chat/types.ts @@ -1,22 +1,21 @@ -import { Message, TaskResult } from "../../../types/datamodel"; - -export type ThreadStatus = "streaming" | "complete" | "error" | "cancelled"; +import { + AgentMessageConfig, + Message, + TaskResult, + ThreadStatus, +} from "../../../types/datamodel"; export interface ThreadState { - messages: any[]; + messages: AgentMessageConfig[]; finalResult?: any; status: ThreadStatus; isExpanded: boolean; -} - -export interface ThreadState { - messages: any[]; - finalResult?: any; - status: "streaming" | "complete" | "error" | "cancelled"; - isExpanded: boolean; reason?: string; + inputRequest?: { + prompt: string; + isPending: boolean; + }; } - export interface MessageListProps { messages: Message[]; threadMessages: Record; @@ -33,15 +32,15 @@ export interface ModelUsage { completion_tokens: number; } -export interface SocketMessage { - type: "message" | "result" | "completion"; - data?: { - source?: string; - models_usage?: ModelUsage | null; - content?: string; - task_result?: TaskResult; - }; - status?: ThreadStatus; - timestamp?: string; - error?: string; +export const TIMEOUT_CONFIG = { + DURATION_MS: 3 * 60 * 1000, // 3 minutes in milliseconds + DURATION_SEC: 3 * 60, // 3 minutes in seconds + WEBSOCKET_CODE: 4000, // WebSocket close code for timeout + DEFAULT_MESSAGE: "Input timeout after 3 minutes", + WARNING_THRESHOLD_SEC: 30, // Show warning when 30 seconds remaining +} as const; + +export interface TimeoutError { + code: typeof TIMEOUT_CONFIG.WEBSOCKET_CODE; + message: string; } diff --git a/python/packages/autogen-studio/frontend/src/components/views/shared/atoms.tsx b/python/packages/autogen-studio/frontend/src/components/views/shared/atoms.tsx index c03099173e38..15dbc8c6f5a7 100644 --- a/python/packages/autogen-studio/frontend/src/components/views/shared/atoms.tsx +++ b/python/packages/autogen-studio/frontend/src/components/views/shared/atoms.tsx @@ -6,3 +6,38 @@ export const LoadingIndicator = ({ size = 16 }: { size: number }) => (
); + +export const LoadingDots = ({ size = 8 }) => { + return ( + + + + + + ); +}; + +export default LoadingDots; diff --git a/python/packages/autogen-studio/frontend/src/components/views/shared/session/api.ts b/python/packages/autogen-studio/frontend/src/components/views/shared/session/api.ts index 65ce38c2e0b5..cccd6ad61a76 100644 --- a/python/packages/autogen-studio/frontend/src/components/views/shared/session/api.ts +++ b/python/packages/autogen-studio/frontend/src/components/views/shared/session/api.ts @@ -14,7 +14,7 @@ export class SessionAPI { async listSessions(userId: string): Promise { const response = await fetch( - `${this.getBaseUrl()}/sessions?user_id=${userId}`, + `${this.getBaseUrl()}/sessions/?user_id=${userId}`, { headers: this.getHeaders(), } @@ -47,7 +47,7 @@ export class SessionAPI { user_id: userId, // Ensure user_id is included }; - const response = await fetch(`${this.getBaseUrl()}/sessions`, { + const response = await fetch(`${this.getBaseUrl()}/sessions/`, { method: "POST", headers: this.getHeaders(), body: JSON.stringify(session), diff --git a/python/packages/autogen-studio/frontend/src/components/views/shared/session/manager.tsx b/python/packages/autogen-studio/frontend/src/components/views/shared/session/manager.tsx index 4f36680c4a3f..e2f0a9ce7d0e 100644 --- a/python/packages/autogen-studio/frontend/src/components/views/shared/session/manager.tsx +++ b/python/packages/autogen-studio/frontend/src/components/views/shared/session/manager.tsx @@ -149,12 +149,12 @@ export const SessionManager: React.FC = () => { icon={} > New Session{" "} - {sessions.length === 0 && ( - + {/* {sessions.length === 0 && ( + - )} + )} */}
diff --git a/python/packages/autogen-studio/frontend/src/components/views/shared/team/api.ts b/python/packages/autogen-studio/frontend/src/components/views/shared/team/api.ts index f2a4a6b0f262..8d13aa6a488f 100644 --- a/python/packages/autogen-studio/frontend/src/components/views/shared/team/api.ts +++ b/python/packages/autogen-studio/frontend/src/components/views/shared/team/api.ts @@ -14,7 +14,7 @@ export class TeamAPI { async listTeams(userId: string): Promise { const response = await fetch( - `${this.getBaseUrl()}/teams?user_id=${userId}`, + `${this.getBaseUrl()}/teams/?user_id=${userId}`, { headers: this.getHeaders(), } @@ -42,7 +42,7 @@ export class TeamAPI { user_id: userId, }; - const response = await fetch(`${this.getBaseUrl()}/teams`, { + const response = await fetch(`${this.getBaseUrl()}/teams/`, { method: "POST", headers: this.getHeaders(), body: JSON.stringify(team), diff --git a/python/packages/autogen-studio/frontend/src/components/views/shared/team/editor.tsx b/python/packages/autogen-studio/frontend/src/components/views/shared/team/editor.tsx index 36c1e41f5d1b..a1fc87836eab 100644 --- a/python/packages/autogen-studio/frontend/src/components/views/shared/team/editor.tsx +++ b/python/packages/autogen-studio/frontend/src/components/views/shared/team/editor.tsx @@ -7,9 +7,39 @@ import type { Team, TeamConfig } from "../../../types/datamodel"; import { MonacoEditor } from "../monaco"; const defaultTeamConfig: TeamConfig = { - name: "", - participants: [], + version: "1.0.0", + component_type: "team", + name: "weather_team", + participants: [ + { + component_type: "agent", + name: "writing_agent", + agent_type: "AssistantAgent", + system_message: + "You are a helpful assistant. Solve tasks carefully. When done respond with TERMINATE", + model_client: { + component_type: "model", + model: "gpt-4o-2024-08-06", + model_type: "OpenAIChatCompletionClient", + }, + tools: [ + { + component_type: "tool", + name: "get_weather", + description: "Get the weather for a city", + content: + 'async def get_weather(city: str) -> str:\n return f"The weather in {city} is 73 degrees and Sunny."', + tool_type: "PythonFunction", + }, + ], + }, + ], team_type: "RoundRobinGroupChat", + termination_condition: { + component_type: "termination", + termination_type: "MaxMessageTermination", + max_messages: 10, + }, }; type FieldType = { @@ -122,48 +152,57 @@ export const TeamEditor: React.FC = ({ width={800} forceRender > -
-
- Required fields: name (string), team_type ("RoundRobinGroupChat" | - "SelectorGroupChat"), participants (array) -
- -
- +
+ + Work is still being done to create an improved team editor.
- - {jsonError && ( -
- - {jsonError} + +
+ Required fields: name (string), team_type ("RoundRobinGroupChat" | + "SelectorGroupChat"), participants (array)
- )} - - -
- - + +
+
- - + + {jsonError && ( +
+ + {jsonError} +
+ )} + + +
+ + +
+
+ +
); }; diff --git a/python/packages/autogen-studio/frontend/src/hooks/store.tsx b/python/packages/autogen-studio/frontend/src/hooks/store.tsx index e730295b39fd..1272a3fcc7c1 100644 --- a/python/packages/autogen-studio/frontend/src/hooks/store.tsx +++ b/python/packages/autogen-studio/frontend/src/hooks/store.tsx @@ -1,7 +1,29 @@ import { create } from "zustand"; +import { persist, createJSONStorage } from "zustand/middleware"; import { v4 as uuidv4 } from "uuid"; import { Message, Session } from "../components/types/datamodel"; +interface IBreadcrumb { + name: string; + href: string; + current?: boolean; +} + +// New interfaces +export interface IAgentFlowSettings { + direction: "TB" | "LR"; + showLabels: boolean; + showGrid: boolean; + showTokens: boolean; + showMessages: boolean; + // Add any other settings we want to persist +} + +interface IHeaderState { + title: string; + breadcrumbs?: IBreadcrumb[]; +} + interface ISidebarState { isExpanded: boolean; isPinned: boolean; @@ -17,49 +39,95 @@ export interface IConfigState { version: string | null; setVersion: (version: string | null) => void; + // Header state + header: IHeaderState; + setHeader: (header: Partial) => void; + setBreadcrumbs: (breadcrumbs: IBreadcrumb[]) => void; + // Sidebar state sidebar: ISidebarState; setSidebarState: (state: Partial) => void; collapseSidebar: () => void; expandSidebar: () => void; toggleSidebar: () => void; -} -export const useConfigStore = create((set) => ({ - // Existing state - messages: [], - setMessages: (messages) => set({ messages }), - session: null, - setSession: (session) => set({ session }), - sessions: [], - setSessions: (sessions) => set({ sessions }), - version: null, - setVersion: (version) => set({ version }), - connectionId: uuidv4(), - - // Sidebar state and actions - sidebar: { - isExpanded: true, - isPinned: false, - }, + // Agent flow settings agentFlow: IAgentFlowSettings; + agentFlow: IAgentFlowSettings; + setAgentFlowSettings: (settings: Partial) => void; +} - setSidebarState: (newState) => - set((state) => ({ - sidebar: { ...state.sidebar, ...newState }, - })), +// Default settings +const DEFAULT_AGENT_FLOW_SETTINGS: IAgentFlowSettings = { + direction: "TB", + showLabels: true, + showGrid: true, + showTokens: true, + showMessages: true, +}; - collapseSidebar: () => - set((state) => ({ - sidebar: { ...state.sidebar, isExpanded: false }, - })), +export const useConfigStore = create()( + persist( + (set) => ({ + // Existing state + messages: [], + setMessages: (messages) => set({ messages }), + session: null, + setSession: (session) => set({ session }), + sessions: [], + setSessions: (sessions) => set({ sessions }), + version: null, + setVersion: (version) => set({ version }), + connectionId: uuidv4(), - expandSidebar: () => - set((state) => ({ - sidebar: { ...state.sidebar, isExpanded: true }, - })), + // Header state + header: { + title: "", + breadcrumbs: [], + }, + setHeader: (newHeader) => + set((state) => ({ + header: { ...state.header, ...newHeader }, + })), + setBreadcrumbs: (breadcrumbs) => + set((state) => ({ + header: { ...state.header, breadcrumbs }, + })), + // Add AgentFlow settings + agentFlow: DEFAULT_AGENT_FLOW_SETTINGS, + setAgentFlowSettings: (newSettings) => + set((state) => ({ + agentFlow: { ...state.agentFlow, ...newSettings }, + })), - toggleSidebar: () => - set((state) => ({ - sidebar: { ...state.sidebar, isExpanded: !state.sidebar.isExpanded }, - })), -})); + // Sidebar state and actions + sidebar: { + isExpanded: true, + isPinned: false, + }, + setSidebarState: (newState) => + set((state) => ({ + sidebar: { ...state.sidebar, ...newState }, + })), + collapseSidebar: () => + set((state) => ({ + sidebar: { ...state.sidebar, isExpanded: false }, + })), + expandSidebar: () => + set((state) => ({ + sidebar: { ...state.sidebar, isExpanded: true }, + })), + toggleSidebar: () => + set((state) => ({ + sidebar: { ...state.sidebar, isExpanded: !state.sidebar.isExpanded }, + })), + }), + { + name: "app-sidebar-state", + storage: createJSONStorage(() => localStorage), + partialize: (state) => ({ + sidebar: state.sidebar, + agentFlow: state.agentFlow, + }), + } + ) +); diff --git a/python/packages/autogen-studio/frontend/tailwind.config.js b/python/packages/autogen-studio/frontend/tailwind.config.js index 4bc76de888a9..6faec9a83ef4 100644 --- a/python/packages/autogen-studio/frontend/tailwind.config.js +++ b/python/packages/autogen-studio/frontend/tailwind.config.js @@ -9,7 +9,7 @@ module.exports = { typography: { DEFAULT: { css: { - maxWidth: "100ch", // add required value here + maxWidth: "100ch", }, }, }, @@ -17,7 +17,7 @@ module.exports = { height: "height", spacing: "margin, padding", }, - backgroundColor: { + colors: { primary: "var(--color-bg-primary)", secondary: "var(--color-bg-secondary)", accent: "var(--color-bg-accent)", @@ -36,5 +36,16 @@ module.exports = { }, }, }, - plugins: [require("@tailwindcss/typography")], + plugins: [ + require("@tailwindcss/typography"), + function ({ addBase, theme }) { + addBase({ + ":root": { + "--tw-bg-opacity": "1", + "--tw-text-opacity": "1", + "--tw-border-opacity": "1", + }, + }); + }, + ], }; diff --git a/python/packages/autogen-studio/frontend/yarn.lock b/python/packages/autogen-studio/frontend/yarn.lock index 8aaa3126b75a..03ca9a83ff87 100644 --- a/python/packages/autogen-studio/frontend/yarn.lock +++ b/python/packages/autogen-studio/frontend/yarn.lock @@ -20,11 +20,19 @@ resolved "https://registry.yarnpkg.com/@ant-design/charts-util/-/charts-util-0.0.1-alpha.6.tgz#09903d28a15d86cc73fafef5383a5a3f932811bb" integrity sha512-roZobGkUJ0WqULPiQkX/2j01r6Cn0W6WTVpszq9u8dZKwyrSDr+UgfA/hDmrwOm9TWD9HAxe7aRHnvC06dux8w== -"@ant-design/charts@^2.2.1": - version "2.2.1" - resolved "https://registry.yarnpkg.com/@ant-design/charts/-/charts-2.2.1.tgz#5ee5d6f8c477c50308d89e41f57db5b61a628c15" - integrity sha512-OjAZeJo2xWlatj1/dw8eB9CDMsGCZ/BA6Z6k2sLPkSqDV9GUIjGNgXQ9igYrIqfpB+xVq5lLPvHndyXCHcOzMQ== +"@ant-design/charts-util@0.0.1-alpha.7": + version "0.0.1-alpha.7" + resolved "https://registry.yarnpkg.com/@ant-design/charts-util/-/charts-util-0.0.1-alpha.7.tgz#39152b7106970faa226ba857fae64a0eb32f30b9" + integrity sha512-Yh0o6EdO6SvdSnStFZMbnUzjyymkVzV+TQ9ymVW9hlVgO/fUkUII3JYSdV+UVcFnYwUF0YiDKuSTLCZNAzg2bQ== + dependencies: + lodash "^4.17.21" + +"@ant-design/charts@^2.2.3": + version "2.2.3" + resolved "https://registry.yarnpkg.com/@ant-design/charts/-/charts-2.2.3.tgz#ca484a70877ee9c7013951f477fcba661e842525" + integrity sha512-gjyOJwAvRH3NztbR4R7bQ+wVfA/jRT+EgtSEQxQtjka4w0srMIftXgaoxoycgCEHgO1eUg20RWXx0mMFKyWIfg== dependencies: + "@ant-design/graphs" "^2.0.0" "@ant-design/plots" "^2.1.3" lodash "^4.17.21" @@ -64,6 +72,18 @@ dependencies: "@babel/runtime" "^7.24.7" +"@ant-design/graphs@^2.0.0": + version "2.0.0" + resolved "https://registry.yarnpkg.com/@ant-design/graphs/-/graphs-2.0.0.tgz#dd0b39c05ffcb42c9368df0515fdc0a1917f26d8" + integrity sha512-giwe60AHwcQp5mXKQrsDU2/34cKOJQoc4rYPB9N1CqaGFcEWqOi6Kiz7O9s0QRwPBpzyP/boyP01a9qb03ycKw== + dependencies: + "@ant-design/charts-util" "0.0.1-alpha.7" + "@antv/g6" "^5.0.24" + "@antv/g6-extension-react" "^0.1.7" + "@antv/graphin" "^3.0.2" + lodash "^4.17.21" + styled-components "^6.1.13" + "@ant-design/icons-svg@^4.4.0": version "4.4.2" resolved "https://registry.yarnpkg.com/@ant-design/icons-svg/-/icons-svg-4.4.2.tgz#ed2be7fb4d82ac7e1d45a54a5b06d6cecf8be6f6" @@ -103,7 +123,15 @@ resize-observer-polyfill "^1.5.1" throttle-debounce "^5.0.0" -"@antv/component@^2.0.0": +"@antv/algorithm@^0.1.26": + version "0.1.26" + resolved "https://registry.yarnpkg.com/@antv/algorithm/-/algorithm-0.1.26.tgz#e3f5e7f1d8db5b415c3f31e32b119cbcafc8f5de" + integrity sha512-DVhcFSQ8YQnMNW34Mk8BSsfc61iC1sAnmcfYoXTAshYHuU50p/6b7x3QYaGctDNKWGvi1ub7mPcSY0bK+aN0qg== + dependencies: + "@antv/util" "^2.0.13" + tslib "^2.0.0" + +"@antv/component@^2.0.0", "@antv/component@^2.1.1": version "2.1.1" resolved "https://registry.yarnpkg.com/@antv/component/-/component-2.1.1.tgz#c91daad673913b6efb3c6503dca7ba123346197b" integrity sha512-V0UCq3Bekqtjw5WedexT1tHM/9x5BY0UAaU7G/5A2NhRfp9GuaQ8xGWLMSWlCQiJSRZWhPIA7RoOSw4Y/W+7UA== @@ -138,6 +166,17 @@ gl-matrix "^3.4.3" tslib "^2.5.3" +"@antv/g-camera-api@2.0.21": + version "2.0.21" + resolved "https://registry.yarnpkg.com/@antv/g-camera-api/-/g-camera-api-2.0.21.tgz#fb312a5db79addb978dee32fc2bb91c59ba384b5" + integrity sha512-cU903cmIBEyVX6hk7bmoltnnORnRd+KnRQsFzWv+Gg8l99bVOEqVa6/YE+Geh9Gt0JVUr+k06KVs8V40IVkz5Q== + dependencies: + "@antv/g-lite" "2.2.2" + "@antv/util" "^3.3.5" + "@babel/runtime" "^7.25.6" + gl-matrix "^3.4.3" + tslib "^2.5.3" + "@antv/g-canvas@^2.0.0": version "2.0.20" resolved "https://registry.yarnpkg.com/@antv/g-canvas/-/g-canvas-2.0.20.tgz#a79a6960f408a8e3b1982468b6659303b79f5f2d" @@ -154,6 +193,22 @@ "@babel/runtime" "^7.25.6" tslib "^2.5.3" +"@antv/g-canvas@^2.0.24": + version "2.0.25" + resolved "https://registry.yarnpkg.com/@antv/g-canvas/-/g-canvas-2.0.25.tgz#4558fe57b5b6ca823ab6461eb3ce302927443670" + integrity sha512-dHpEghB4f+Ts9b3vNzRf43xZ/V6xhLNYOT/0ZbK30fV495bo3Jn+sL1SSjeMnaTJkU6heBso/9yBn1ns2U7/Vw== + dependencies: + "@antv/g-lite" "2.2.2" + "@antv/g-plugin-canvas-path-generator" "2.1.2" + "@antv/g-plugin-canvas-picker" "2.1.4" + "@antv/g-plugin-canvas-renderer" "2.2.4" + "@antv/g-plugin-dom-interaction" "2.1.7" + "@antv/g-plugin-html-renderer" "2.1.7" + "@antv/g-plugin-image-loader" "2.1.4" + "@antv/util" "^3.3.5" + "@babel/runtime" "^7.25.6" + tslib "^2.5.3" + "@antv/g-dom-mutation-observer-api@2.0.15": version "2.0.15" resolved "https://registry.yarnpkg.com/@antv/g-dom-mutation-observer-api/-/g-dom-mutation-observer-api-2.0.15.tgz#035199a7dca34514308edb723bfc59f0560b10ac" @@ -162,6 +217,14 @@ "@antv/g-lite" "2.1.4" "@babel/runtime" "^7.25.6" +"@antv/g-dom-mutation-observer-api@2.0.18": + version "2.0.18" + resolved "https://registry.yarnpkg.com/@antv/g-dom-mutation-observer-api/-/g-dom-mutation-observer-api-2.0.18.tgz#4ace5f4932dc3ce594912800487d6581ab193c18" + integrity sha512-LOriTfw9iSJVQv24VpBKnwWTy2Axv1JxnOpfC6siV8M1D+5cfv+fBmWs6cIQKxM9p7RZSkpaYFQWM27+sH0AvA== + dependencies: + "@antv/g-lite" "2.2.2" + "@babel/runtime" "^7.25.6" + "@antv/g-lite@2.1.4": version "2.1.4" resolved "https://registry.yarnpkg.com/@antv/g-lite/-/g-lite-2.1.4.tgz#28ec3e971ff86c031b51750bb2765525dce79cda" @@ -176,6 +239,20 @@ rbush "^3.0.1" tslib "^2.5.3" +"@antv/g-lite@2.2.2": + version "2.2.2" + resolved "https://registry.yarnpkg.com/@antv/g-lite/-/g-lite-2.2.2.tgz#154d60f68f842c1f3e45f91d8741ae2edd152c5a" + integrity sha512-Ffk7Jar+n6lUA+TEvoEaN30rAWe5l6Ybic7lucA90PKiyCsN0w+qVGJzbvskamuwp3RmcSZDNwQGP8vg374dCA== + dependencies: + "@antv/g-math" "3.0.0" + "@antv/util" "^3.3.5" + "@babel/runtime" "^7.25.6" + d3-color "^3.1.0" + eventemitter3 "^5.0.1" + gl-matrix "^3.4.3" + rbush "^3.0.1" + tslib "^2.5.3" + "@antv/g-math@3.0.0": version "3.0.0" resolved "https://registry.yarnpkg.com/@antv/g-math/-/g-math-3.0.0.tgz#834d993391546e39ae5a30452572fdc49a7c57ec" @@ -196,6 +273,17 @@ "@babel/runtime" "^7.25.6" tslib "^2.5.3" +"@antv/g-plugin-canvas-path-generator@2.1.2": + version "2.1.2" + resolved "https://registry.yarnpkg.com/@antv/g-plugin-canvas-path-generator/-/g-plugin-canvas-path-generator-2.1.2.tgz#b96f5a572eebb66f2607414fe180f84c9fb67667" + integrity sha512-ILaKEQvbAZNkRhbE3kWxd0EszDAb3TE0HQKNfur7YuVkQMgACDj6jbVUULFPcGpcw/pQbWM8nD1RRf24hfFAlw== + dependencies: + "@antv/g-lite" "2.2.2" + "@antv/g-math" "3.0.0" + "@antv/util" "^3.3.5" + "@babel/runtime" "^7.25.6" + tslib "^2.5.3" + "@antv/g-plugin-canvas-picker@2.0.17": version "2.0.17" resolved "https://registry.yarnpkg.com/@antv/g-plugin-canvas-picker/-/g-plugin-canvas-picker-2.0.17.tgz#a213d9d7a0a6613c7456f65514fa1d7efb3fc312" @@ -210,6 +298,20 @@ gl-matrix "^3.4.3" tslib "^2.5.3" +"@antv/g-plugin-canvas-picker@2.1.4": + version "2.1.4" + resolved "https://registry.yarnpkg.com/@antv/g-plugin-canvas-picker/-/g-plugin-canvas-picker-2.1.4.tgz#963dfe2a8a6771ab969910e52a7d1e1a6ef7f0ed" + integrity sha512-L/L7wiZVCnLumIsE1DU14t4kwnwOLyQlsB3ZE/Q6ZVEf6UiZp1ARrGR8zzz+yUPxeWiJqduVnBRLOkyZkdjOoQ== + dependencies: + "@antv/g-lite" "2.2.2" + "@antv/g-math" "3.0.0" + "@antv/g-plugin-canvas-path-generator" "2.1.2" + "@antv/g-plugin-canvas-renderer" "2.2.4" + "@antv/util" "^3.3.5" + "@babel/runtime" "^7.25.6" + gl-matrix "^3.4.3" + tslib "^2.5.3" + "@antv/g-plugin-canvas-renderer@2.1.4": version "2.1.4" resolved "https://registry.yarnpkg.com/@antv/g-plugin-canvas-renderer/-/g-plugin-canvas-renderer-2.1.4.tgz#677449301512e30ae4ec6975689b19f3e8241f41" @@ -224,6 +326,20 @@ gl-matrix "^3.4.3" tslib "^2.5.3" +"@antv/g-plugin-canvas-renderer@2.2.4": + version "2.2.4" + resolved "https://registry.yarnpkg.com/@antv/g-plugin-canvas-renderer/-/g-plugin-canvas-renderer-2.2.4.tgz#3ac4227c16dfc52f074bc8343a9eea4afc99d403" + integrity sha512-cmqVUB+aAglj/gYvc8Zcqjwe9PXOapyTE6Q9ChYVnlStP8rMdyXgdfACNa7pi7T1TWoq2etuEp29t4k+oGKLqQ== + dependencies: + "@antv/g-lite" "2.2.2" + "@antv/g-math" "3.0.0" + "@antv/g-plugin-canvas-path-generator" "2.1.2" + "@antv/g-plugin-image-loader" "2.1.4" + "@antv/util" "^3.3.5" + "@babel/runtime" "^7.25.6" + gl-matrix "^3.4.3" + tslib "^2.5.3" + "@antv/g-plugin-dom-interaction@2.1.4": version "2.1.4" resolved "https://registry.yarnpkg.com/@antv/g-plugin-dom-interaction/-/g-plugin-dom-interaction-2.1.4.tgz#56c4657ffddfa0a3ab86a4c7cb91df2b87c85b2f" @@ -233,6 +349,15 @@ "@babel/runtime" "^7.25.6" tslib "^2.5.3" +"@antv/g-plugin-dom-interaction@2.1.7": + version "2.1.7" + resolved "https://registry.yarnpkg.com/@antv/g-plugin-dom-interaction/-/g-plugin-dom-interaction-2.1.7.tgz#93ffb590701999b742628936390a5890b81fab80" + integrity sha512-wrlcFlWsXq9Pa6ju5de8V0TzIR2GivBOkRyHQuNba6kpUC1RAT0EHpTpPCJML+Jys8nt8A0ppssSF4E0jZ1MpQ== + dependencies: + "@antv/g-lite" "2.2.2" + "@babel/runtime" "^7.25.6" + tslib "^2.5.3" + "@antv/g-plugin-dragndrop@^2.0.0": version "2.0.15" resolved "https://registry.yarnpkg.com/@antv/g-plugin-dragndrop/-/g-plugin-dragndrop-2.0.15.tgz#8e710a8f66ed971c8d500cae82664de913405e05" @@ -243,6 +368,16 @@ "@babel/runtime" "^7.25.6" tslib "^2.5.3" +"@antv/g-plugin-dragndrop@^2.0.18": + version "2.0.18" + resolved "https://registry.yarnpkg.com/@antv/g-plugin-dragndrop/-/g-plugin-dragndrop-2.0.18.tgz#1e7df5d6e40a29befc4e563ceb19f2f6d6b972e0" + integrity sha512-kjRj/yoWXh5J0Db/gtB3TFNlR4x3dMWvlRz6+7M3ka/mlrdMS0SGFLZfbWq6xvN+TzyiDXZdzSwA0A0OkA5Jww== + dependencies: + "@antv/g-lite" "2.2.2" + "@antv/util" "^3.3.5" + "@babel/runtime" "^7.25.6" + tslib "^2.5.3" + "@antv/g-plugin-html-renderer@2.1.4": version "2.1.4" resolved "https://registry.yarnpkg.com/@antv/g-plugin-html-renderer/-/g-plugin-html-renderer-2.1.4.tgz#e5604a9890495fa33e9f02f7648ec0d83cd8a5f5" @@ -254,6 +389,17 @@ gl-matrix "^3.4.3" tslib "^2.5.3" +"@antv/g-plugin-html-renderer@2.1.7": + version "2.1.7" + resolved "https://registry.yarnpkg.com/@antv/g-plugin-html-renderer/-/g-plugin-html-renderer-2.1.7.tgz#ec4874d86a887809e9ef2b096132d6a50c1b53d4" + integrity sha512-MPquXo9MT9QHUdbvgwcBr9msNv9Qh2slOKpc1fzwWG7/aLpmOAxTAhd3+Sjj8JuVXHYw25Q3seI8lMJSGwTWvw== + dependencies: + "@antv/g-lite" "2.2.2" + "@antv/util" "^3.3.5" + "@babel/runtime" "^7.25.6" + gl-matrix "^3.4.3" + tslib "^2.5.3" + "@antv/g-plugin-image-loader@2.0.15": version "2.0.15" resolved "https://registry.yarnpkg.com/@antv/g-plugin-image-loader/-/g-plugin-image-loader-2.0.15.tgz#7928c4abf7ff283deddde78bc35a34bc46a53932" @@ -265,6 +411,51 @@ gl-matrix "^3.4.3" tslib "^2.5.3" +"@antv/g-plugin-image-loader@2.1.4": + version "2.1.4" + resolved "https://registry.yarnpkg.com/@antv/g-plugin-image-loader/-/g-plugin-image-loader-2.1.4.tgz#0b4f5e00a0941178db4efd54afc182b47c6ce543" + integrity sha512-dRikFBY/GNvMW7ecbDerChgnKnjZQoSNONR8uL/bZ1vurTZTd9HK1FYz2QDjLIRePWNTlfcWmux1RooSKfo1OA== + dependencies: + "@antv/g-lite" "2.2.2" + "@antv/util" "^3.3.5" + "@babel/runtime" "^7.25.6" + gl-matrix "^3.4.3" + tslib "^2.5.3" + +"@antv/g-plugin-svg-picker@2.0.20": + version "2.0.20" + resolved "https://registry.yarnpkg.com/@antv/g-plugin-svg-picker/-/g-plugin-svg-picker-2.0.20.tgz#5401d37816a1bb6c8457fabfe38bc0eddeb2b383" + integrity sha512-gML0upmK24Bqr8REsuW8ZZqlKNHSEDtYlycFof36HDcKVNDWIf22ff5N3op1rCHkbny6XXjkIXoKulfQM6GHOA== + dependencies: + "@antv/g-lite" "2.2.2" + "@antv/g-plugin-svg-renderer" "2.2.2" + "@babel/runtime" "^7.25.6" + tslib "^2.5.3" + +"@antv/g-plugin-svg-renderer@2.2.2": + version "2.2.2" + resolved "https://registry.yarnpkg.com/@antv/g-plugin-svg-renderer/-/g-plugin-svg-renderer-2.2.2.tgz#c48bd91df47e73ebce22a9e8eab63cedecc221ff" + integrity sha512-fWQ5gVSxcZr+Ip95wVeHY2WtoWnsyGDqj+lkxK1aAq0uzqI2qIVyawUc86R87Bv4UVY9b0B5KCtig5F5SGGwAg== + dependencies: + "@antv/g-lite" "2.2.2" + "@antv/util" "^3.3.5" + "@babel/runtime" "^7.25.6" + gl-matrix "^3.4.3" + tslib "^2.5.3" + +"@antv/g-svg@^2.0.11": + version "2.0.20" + resolved "https://registry.yarnpkg.com/@antv/g-svg/-/g-svg-2.0.20.tgz#6f8377bd310381102b80930756a99e97f565b5aa" + integrity sha512-KMb5VzQ3nZpBBwH+sRIJGh/umfEpv4SmDWwV/Yy4nAn0X91yq++qEiIDCnq9QSXHOESV6Si7XJbo4WtX5G7TPw== + dependencies: + "@antv/g-lite" "2.2.2" + "@antv/g-plugin-dom-interaction" "2.1.7" + "@antv/g-plugin-svg-picker" "2.0.20" + "@antv/g-plugin-svg-renderer" "2.2.2" + "@antv/util" "^3.3.5" + "@babel/runtime" "^7.25.6" + tslib "^2.5.3" + "@antv/g-web-animations-api@2.1.4": version "2.1.4" resolved "https://registry.yarnpkg.com/@antv/g-web-animations-api/-/g-web-animations-api-2.1.4.tgz#aaa2be6014fc58b9d8f6724b053b7893eae4cc18" @@ -275,6 +466,16 @@ "@babel/runtime" "^7.25.6" tslib "^2.5.3" +"@antv/g-web-animations-api@2.1.7": + version "2.1.7" + resolved "https://registry.yarnpkg.com/@antv/g-web-animations-api/-/g-web-animations-api-2.1.7.tgz#c56868ebaa8ddc7aca79c9314f6a211f91847039" + integrity sha512-yx7ZwLUgiglCe7sSisloWesO8gwgNwTGeDE0fTHhk1kZmO0BxZ8q6/VxlmLgXTyqdRso+6wfsTo2HukMURJSNw== + dependencies: + "@antv/g-lite" "2.2.2" + "@antv/util" "^3.3.5" + "@babel/runtime" "^7.25.6" + tslib "^2.5.3" + "@antv/g2-extension-plot@^0.2.0": version "0.2.1" resolved "https://registry.yarnpkg.com/@antv/g2-extension-plot/-/g2-extension-plot-0.2.1.tgz#664a3cdf2d4d708ed1231ee90d984812b6226d29" @@ -311,6 +512,44 @@ fmin "^0.0.2" pdfast "^0.2.0" +"@antv/g6-extension-react@^0.1.7": + version "0.1.7" + resolved "https://registry.yarnpkg.com/@antv/g6-extension-react/-/g6-extension-react-0.1.7.tgz#d0cf5cb2584383c47bd4d6fd3a5caeeca3943e11" + integrity sha512-fKk1weq2odHSTi5i8iSg9/keDPbufryA2TZ2X2j+qkSAwxJ7WtURagV/7/CUN9r1tMMk1eoiuzQZXdvc72a1GA== + dependencies: + "@antv/g" "^6.0.13" + "@antv/g-svg" "^2.0.11" + "@antv/react-g" "^2.0.14" + +"@antv/g6@^5.0.24", "@antv/g6@^5.0.28": + version "5.0.30" + resolved "https://registry.yarnpkg.com/@antv/g6/-/g6-5.0.30.tgz#0e241100bc0519ba0bf2c0ab11cb80a576f1796d" + integrity sha512-QEpNNAz/DcSnyHMJJ1UNSVjKgbfJ0zhwHcq8I/+f/mZl87oK1GlcRi8FCVcPGBb+W3OH8xfH5GIjPxEPma1kxg== + dependencies: + "@antv/algorithm" "^0.1.26" + "@antv/component" "^2.1.1" + "@antv/event-emitter" "^0.1.3" + "@antv/g" "^6.1.7" + "@antv/g-canvas" "^2.0.24" + "@antv/g-plugin-dragndrop" "^2.0.18" + "@antv/graphlib" "^2.0.3" + "@antv/hierarchy" "^0.6.14" + "@antv/layout" "1.2.14-beta.9" + "@antv/util" "^3.3.10" + bubblesets-js "^2.3.4" + hull.js "^1.0.6" + +"@antv/g@6.1.7", "@antv/g@^6.0.13", "@antv/g@^6.1.7": + version "6.1.7" + resolved "https://registry.yarnpkg.com/@antv/g/-/g-6.1.7.tgz#3003b2532c4287f2208537bcca87bffd0e44c30a" + integrity sha512-qv8YnBKqX3Yjs85U9OnBa6E92tNAI3cKrBhDrI5EikzjVPqfcVQLx0P5Zo8uzCYt7m9jFpJCi/iaGvWX/fA14Q== + dependencies: + "@antv/g-camera-api" "2.0.21" + "@antv/g-dom-mutation-observer-api" "2.0.18" + "@antv/g-lite" "2.2.2" + "@antv/g-web-animations-api" "2.1.7" + "@babel/runtime" "^7.25.6" + "@antv/g@^6.0.0", "@antv/g@^6.1.2": version "6.1.4" resolved "https://registry.yarnpkg.com/@antv/g/-/g-6.1.4.tgz#4279fc065ab2fe4581874fddb06c46ce5c58c841" @@ -322,6 +561,56 @@ "@antv/g-web-animations-api" "2.1.4" "@babel/runtime" "^7.25.6" +"@antv/graphin@^3.0.2": + version "3.0.4" + resolved "https://registry.yarnpkg.com/@antv/graphin/-/graphin-3.0.4.tgz#33f4ead798d8f1fa1bf885247c68ca94d18e17c4" + integrity sha512-7ce6RDI5Z6ud93yiyS7b+mmFrHJhlkwwNo53kb7P7KoCsnV7ioMONDE6Gw0ROeMSR6TwHtxGZUhHw9wxnPp82Q== + dependencies: + "@antv/g6" "^5.0.28" + +"@antv/graphlib@^2.0.0", "@antv/graphlib@^2.0.3": + version "2.0.3" + resolved "https://registry.yarnpkg.com/@antv/graphlib/-/graphlib-2.0.3.tgz#493e05872851c897e2133b0968cf5c6f4f6c022e" + integrity sha512-EtQR+DIfsYy28tumTnH560v7yIzXZq0nSgFBZh76mMiV1oHEN1L4p6JKu7IMtILH14mDqzmYYYFetYoAODoQUw== + dependencies: + "@antv/event-emitter" "^0.1.3" + +"@antv/hierarchy@^0.6.14": + version "0.6.14" + resolved "https://registry.yarnpkg.com/@antv/hierarchy/-/hierarchy-0.6.14.tgz#4e8b4966c9c2a44aaa6f9da7008c4bd44d490385" + integrity sha512-V3uknf7bhynOqQDw2sg+9r9DwZ9pc6k/EcqyTFdfXB1+ydr7urisP0MipIuimucvQKN+Qkd+d6w601r1UIroqQ== + +"@antv/layout@1.2.14-beta.9": + version "1.2.14-beta.9" + resolved "https://registry.yarnpkg.com/@antv/layout/-/layout-1.2.14-beta.9.tgz#5c66a0f22158c545aabd1654a50bfc8c3bf93f98" + integrity sha512-wPlwBFMtq2lWZFc89/7Lzb8fjHnyKVZZ9zBb2h+zZIP0YWmVmHRE8+dqCiPKOyOGUXEdDtn813f1g107dCHZlg== + dependencies: + "@antv/event-emitter" "^0.1.3" + "@antv/graphlib" "^2.0.0" + "@antv/util" "^3.3.2" + "@naoak/workerize-transferable" "^0.1.0" + comlink "^4.4.1" + d3-force "^3.0.0" + d3-force-3d "^3.0.5" + d3-octree "^1.0.2" + d3-quadtree "^3.0.1" + dagre "^0.8.5" + ml-matrix "^6.10.4" + tslib "^2.5.0" + +"@antv/react-g@^2.0.14": + version "2.0.23" + resolved "https://registry.yarnpkg.com/@antv/react-g/-/react-g-2.0.23.tgz#74df2fb27eba3144fd914baf51b9607233b4b670" + integrity sha512-Cur5/B6kRRK7kxj5USsEobKcGAGoWG9fDltVJ/3m95kMN95Ayx2rbVFxc5NavTrnmhxO4OUkRIyb0PDBFApfQA== + dependencies: + "@antv/g" "6.1.7" + "@antv/util" "^3.3.5" + "@babel/runtime" "^7.25.6" + gl-matrix "^3.4.3" + react-reconciler "^0.26.2" + scheduler "^0.20.2" + tslib "^2.5.3" + "@antv/scale@^0.4.12", "@antv/scale@^0.4.3": version "0.4.16" resolved "https://registry.yarnpkg.com/@antv/scale/-/scale-0.4.16.tgz#60557470668ccfe5217e482a01f05c0cbb706b62" @@ -339,7 +628,7 @@ csstype "^3.0.8" tslib "^2.0.3" -"@antv/util@^3.3.5", "@antv/util@^3.3.7": +"@antv/util@^3.3.10", "@antv/util@^3.3.2", "@antv/util@^3.3.5", "@antv/util@^3.3.7": version "3.3.10" resolved "https://registry.yarnpkg.com/@antv/util/-/util-3.3.10.tgz#6fb2560c0f42df61f824e1f995a1ed1bdb00eb9a" integrity sha512-basGML3DFA3O87INnzvDStjzS+n0JLEhRnRsDzP9keiXz8gT1z/fTdmJAZFOzMMWxy+HKbi7NbSt0+8vz/OsBQ== @@ -1345,7 +1634,7 @@ "@babel/plugin-transform-modules-commonjs" "^7.25.9" "@babel/plugin-transform-typescript" "^7.25.9" -"@babel/runtime@^7.0.0", "@babel/runtime@^7.10.1", "@babel/runtime@^7.10.4", "@babel/runtime@^7.11.1", "@babel/runtime@^7.11.2", "@babel/runtime@^7.12.5", "@babel/runtime@^7.16.7", "@babel/runtime@^7.18.0", "@babel/runtime@^7.18.3", "@babel/runtime@^7.20.0", "@babel/runtime@^7.20.13", "@babel/runtime@^7.20.7", "@babel/runtime@^7.21.0", "@babel/runtime@^7.22.5", "@babel/runtime@^7.23.2", "@babel/runtime@^7.23.6", "@babel/runtime@^7.23.9", "@babel/runtime@^7.24.4", "@babel/runtime@^7.24.7", "@babel/runtime@^7.24.8", "@babel/runtime@^7.25.6", "@babel/runtime@^7.8.4", "@babel/runtime@^7.9.2": +"@babel/runtime@^7.0.0", "@babel/runtime@^7.10.1", "@babel/runtime@^7.10.4", "@babel/runtime@^7.11.1", "@babel/runtime@^7.11.2", "@babel/runtime@^7.12.5", "@babel/runtime@^7.16.7", "@babel/runtime@^7.18.0", "@babel/runtime@^7.18.3", "@babel/runtime@^7.20.0", "@babel/runtime@^7.20.13", "@babel/runtime@^7.20.7", "@babel/runtime@^7.21.0", "@babel/runtime@^7.22.5", "@babel/runtime@^7.23.2", "@babel/runtime@^7.23.6", "@babel/runtime@^7.23.9", "@babel/runtime@^7.24.4", "@babel/runtime@^7.24.7", "@babel/runtime@^7.24.8", "@babel/runtime@^7.25.6", "@babel/runtime@^7.25.7", "@babel/runtime@^7.8.4", "@babel/runtime@^7.9.2": version "7.26.0" resolved "https://registry.yarnpkg.com/@babel/runtime/-/runtime-7.26.0.tgz#8600c2f595f277c60815256418b85356a65173c1" integrity sha512-FDSOghenHTiToteC/QRlv2q3DhPZ/oOXTBoirfWNx1Cx3TMVcGWQtMMmQcSvb/JjpNeGzx8Pq/b4fKEJuWm1sw== @@ -1409,6 +1698,23 @@ resolved "https://registry.yarnpkg.com/@emotion/hash/-/hash-0.8.0.tgz#bbbff68978fefdbe68ccb533bc8cbe1d1afb5413" integrity sha512-kBJtf7PH6aWwZ6fka3zQ0p6SBYzx4fl1LoZXE2RrnYST9Xljm7WfKJrU4g/Xr3Beg72MLrp1AWNUmuYJTL7Cow== +"@emotion/is-prop-valid@1.2.2": + version "1.2.2" + resolved "https://registry.yarnpkg.com/@emotion/is-prop-valid/-/is-prop-valid-1.2.2.tgz#d4175076679c6a26faa92b03bb786f9e52612337" + integrity sha512-uNsoYd37AFmaCdXlg6EYD1KaPOaRWRByMCYzbKUX4+hhMfrxdVSelShywL4JVaAeM/eHUOSprYBQls+/neX3pw== + dependencies: + "@emotion/memoize" "^0.8.1" + +"@emotion/memoize@^0.8.1": + version "0.8.1" + resolved "https://registry.yarnpkg.com/@emotion/memoize/-/memoize-0.8.1.tgz#c1ddb040429c6d21d38cc945fe75c818cfb68e17" + integrity sha512-W2P2c/VRW1/1tLox0mVUalvnWXxavmv/Oum2aPsRcoDJuob75FC3Y8FbpfLwUegRcxINtGUMPq0tFCvYNTBXNA== + +"@emotion/unitless@0.8.1": + version "0.8.1" + resolved "https://registry.yarnpkg.com/@emotion/unitless/-/unitless-0.8.1.tgz#182b5a4704ef8ad91bde93f7a860a88fd92c79a3" + integrity sha512-KOEGMu6dmJZtpadb476IsZBclKvILjopjUii3V+7MnXIQCYh8W3NgNcgwo21n9LXZX6EDIKvqfjYxXebDwxKmQ== + "@emotion/unitless@^0.7.5": version "0.7.5" resolved "https://registry.yarnpkg.com/@emotion/unitless/-/unitless-0.7.5.tgz#77211291c1900a700b8a78cfafda3160d76949ed" @@ -1441,15 +1747,51 @@ minimatch "^3.0.4" strip-json-comments "^3.1.1" -"@gatsbyjs/parcel-namer-relative-to-cwd@^2.13.1": - version "2.13.1" - resolved "https://registry.yarnpkg.com/@gatsbyjs/parcel-namer-relative-to-cwd/-/parcel-namer-relative-to-cwd-2.13.1.tgz#79d2692330a942394200c9bf3e820a4e55075ffb" - integrity sha512-ze0u/CAt6fKV2yQlExkBARi8oqA559lX6/GFWwdtD9S1J4h8Bje70Odl/bcIECvT/w9mWCCQEVtKLvqkraDopw== +"@floating-ui/core@^1.6.0": + version "1.6.8" + resolved "https://registry.yarnpkg.com/@floating-ui/core/-/core-1.6.8.tgz#aa43561be075815879305965020f492cdb43da12" + integrity sha512-7XJ9cPU+yI2QeLS+FCSlqNFZJq8arvswefkZrYI1yQBbftw6FyrZOxYSh+9S7z7TpeWlRt9zJ5IhM1WIL334jA== + dependencies: + "@floating-ui/utils" "^0.2.8" + +"@floating-ui/dom@^1.0.0": + version "1.6.12" + resolved "https://registry.yarnpkg.com/@floating-ui/dom/-/dom-1.6.12.tgz#6333dcb5a8ead3b2bf82f33d6bc410e95f54e556" + integrity sha512-NP83c0HjokcGVEMeoStg317VD9W7eDlGK7457dMBANbKA6GJZdc7rjujdgqzTaz93jkGgc5P/jeWbaCHnMNc+w== + dependencies: + "@floating-ui/core" "^1.6.0" + "@floating-ui/utils" "^0.2.8" + +"@floating-ui/react-dom@^2.1.2": + version "2.1.2" + resolved "https://registry.yarnpkg.com/@floating-ui/react-dom/-/react-dom-2.1.2.tgz#a1349bbf6a0e5cb5ded55d023766f20a4d439a31" + integrity sha512-06okr5cgPzMNBy+Ycse2A6udMi4bqwW/zgBF/rwjcNqWkyr82Mcg8b0vjX8OJpZFy/FKjJmw6wV7t44kK6kW7A== + dependencies: + "@floating-ui/dom" "^1.0.0" + +"@floating-ui/react@^0.26.16": + version "0.26.27" + resolved "https://registry.yarnpkg.com/@floating-ui/react/-/react-0.26.27.tgz#402f7b4b2702650662705fe9cbe0f1d5607846a1" + integrity sha512-jLP72x0Kr2CgY6eTYi/ra3VA9LOkTo4C+DUTrbFgFOExKy3omYVmwMjNKqxAHdsnyLS96BIDLcO2SlnsNf8KUQ== + dependencies: + "@floating-ui/react-dom" "^2.1.2" + "@floating-ui/utils" "^0.2.8" + tabbable "^6.0.0" + +"@floating-ui/utils@^0.2.8": + version "0.2.8" + resolved "https://registry.yarnpkg.com/@floating-ui/utils/-/utils-0.2.8.tgz#21a907684723bbbaa5f0974cf7730bd797eb8e62" + integrity sha512-kym7SodPp8/wloecOpcmSnWJsK7M0E5Wg8UcFA+uO4B9s5d0ywXOEro/8HM9x0rW+TljRzul/14UYz3TleT3ig== + +"@gatsbyjs/parcel-namer-relative-to-cwd@2.14.0": + version "2.14.0" + resolved "https://registry.yarnpkg.com/@gatsbyjs/parcel-namer-relative-to-cwd/-/parcel-namer-relative-to-cwd-2.14.0.tgz#46556c54c7c958528c1fdf9bda2a3d96886bdd2c" + integrity sha512-BWIIsz4oPLIUVAWr4Lgw4BGtJjjhb39+oTrXUa6rN0A+vS5qXrpkx1nGzVb5PJ2RJY9Paf4hNLZVW9LaLnGLBA== dependencies: "@babel/runtime" "^7.20.13" "@parcel/namer-default" "2.8.3" "@parcel/plugin" "2.8.3" - gatsby-core-utils "^4.13.1" + gatsby-core-utils "^4.14.0" "@gatsbyjs/reach-router@^2.0.1": version "2.0.1" @@ -1656,13 +1998,15 @@ dependencies: "@hapi/hoek" "^9.0.0" -"@headlessui/react@^1.7.16": - version "1.7.19" - resolved "https://registry.yarnpkg.com/@headlessui/react/-/react-1.7.19.tgz#91c78cf5fcb254f4a0ebe96936d48421caf75f40" - integrity sha512-Ll+8q3OlMJfJbAKM/+/Y2q6PPYbryqNTXDbryx7SXLIDamkF6iQFbriYHga0dY44PvDhvvBWCx1Xj4U5+G4hOw== +"@headlessui/react@^2.2.0": + version "2.2.0" + resolved "https://registry.yarnpkg.com/@headlessui/react/-/react-2.2.0.tgz#a8e32f0899862849a1ce1615fa280e7891431ab7" + integrity sha512-RzCEg+LXsuI7mHiSomsu/gBJSjpupm6A1qIZ5sWjd7JhARNlMiSA4kKfJpCKwU9tE+zMRterhhrP74PvfJrpXQ== dependencies: - "@tanstack/react-virtual" "^3.0.0-beta.60" - client-only "^0.0.1" + "@floating-ui/react" "^0.26.16" + "@react-aria/focus" "^3.17.1" + "@react-aria/interactions" "^3.21.3" + "@tanstack/react-virtual" "^3.8.1" "@heroicons/react@^2.0.18": version "2.1.5" @@ -1904,6 +2248,11 @@ resolved "https://registry.yarnpkg.com/@msgpackr-extract/msgpackr-extract-win32-x64/-/msgpackr-extract-win32-x64-3.0.3.tgz#0aa5502d547b57abfc4ac492de68e2006e417242" integrity sha512-x0fWaQtYp4E6sktbsdAqnehxDgEc/VwM7uLsRCYWaiGu0ykYdZPiS8zCWdnjHwyiumousxfBm4SO31eXqwEZhQ== +"@naoak/workerize-transferable@^0.1.0": + version "0.1.0" + resolved "https://registry.yarnpkg.com/@naoak/workerize-transferable/-/workerize-transferable-0.1.0.tgz#864cc8241b977bffd8661c0be1441da9b4bfb633" + integrity sha512-fDLfuP71IPNP5+zSfxFb52OHgtjZvauRJWbVnpzQ7G7BjcbLjTny0OW1d3ZO806XKpLWNKmeeW3MhE0sy8iwYQ== + "@nicolo-ribaudo/eslint-scope-5-internals@5.1.1-v1": version "5.1.1-v1" resolved "https://registry.yarnpkg.com/@nicolo-ribaudo/eslint-scope-5-internals/-/eslint-scope-5-internals-5.1.1-v1.tgz#dbf733a965ca47b1973177dc0bb6c889edcfb129" @@ -2417,7 +2766,7 @@ classnames "^2.3.2" rc-util "^5.24.4" -"@rc-component/trigger@^2.0.0", "@rc-component/trigger@^2.1.1", "@rc-component/trigger@^2.2.3": +"@rc-component/trigger@^2.0.0", "@rc-component/trigger@^2.1.1": version "2.2.3" resolved "https://registry.yarnpkg.com/@rc-component/trigger/-/trigger-2.2.3.tgz#b47e945115e2d0a7f7e067dbb9ed76c91c1b4385" integrity sha512-X1oFIpKoXAMXNDYCviOmTfuNuYxE4h5laBsyCqVAVMjNHxoF3/uiyA7XdegK1XbCvBbCZ6P6byWrEoDRpKL8+A== @@ -2429,6 +2778,69 @@ rc-resize-observer "^1.3.1" rc-util "^5.38.0" +"@rc-component/trigger@^2.2.5": + version "2.2.5" + resolved "https://registry.yarnpkg.com/@rc-component/trigger/-/trigger-2.2.5.tgz#5ebe383e563e667b3fa24b6b32afedbab378a92e" + integrity sha512-F1EJ4KjFpGAHAjuKvOyZB/6IZDkVx0bHl0M4fQM5wXcmm7lgTgVSSnR3bXwdmS6jOJGHOqfDxIJW3WUvwMIXhQ== + dependencies: + "@babel/runtime" "^7.23.2" + "@rc-component/portal" "^1.1.0" + classnames "^2.3.2" + rc-motion "^2.0.0" + rc-resize-observer "^1.3.1" + rc-util "^5.38.0" + +"@react-aria/focus@^3.17.1": + version "3.18.4" + resolved "https://registry.yarnpkg.com/@react-aria/focus/-/focus-3.18.4.tgz#a6e95896bc8680d1b5bcd855e983fc2c195a1a55" + integrity sha512-91J35077w9UNaMK1cpMUEFRkNNz0uZjnSwiyBCFuRdaVuivO53wNC9XtWSDNDdcO5cGy87vfJRVAiyoCn/mjqA== + dependencies: + "@react-aria/interactions" "^3.22.4" + "@react-aria/utils" "^3.25.3" + "@react-types/shared" "^3.25.0" + "@swc/helpers" "^0.5.0" + clsx "^2.0.0" + +"@react-aria/interactions@^3.21.3", "@react-aria/interactions@^3.22.4": + version "3.22.4" + resolved "https://registry.yarnpkg.com/@react-aria/interactions/-/interactions-3.22.4.tgz#88ed61ab6a485f869bc1f65ae6688d48ca96064b" + integrity sha512-E0vsgtpItmknq/MJELqYJwib+YN18Qag8nroqwjk1qOnBa9ROIkUhWJerLi1qs5diXq9LHKehZDXRlwPvdEFww== + dependencies: + "@react-aria/ssr" "^3.9.6" + "@react-aria/utils" "^3.25.3" + "@react-types/shared" "^3.25.0" + "@swc/helpers" "^0.5.0" + +"@react-aria/ssr@^3.9.6": + version "3.9.6" + resolved "https://registry.yarnpkg.com/@react-aria/ssr/-/ssr-3.9.6.tgz#a9e8b351acdc8238f2b5215b0ce904636c6ea690" + integrity sha512-iLo82l82ilMiVGy342SELjshuWottlb5+VefO3jOQqQRNYnJBFpUSadswDPbRimSgJUZuFwIEYs6AabkP038fA== + dependencies: + "@swc/helpers" "^0.5.0" + +"@react-aria/utils@^3.25.3": + version "3.25.3" + resolved "https://registry.yarnpkg.com/@react-aria/utils/-/utils-3.25.3.tgz#cad9bffc07b045cdc283df2cb65c18747acbf76d" + integrity sha512-PR5H/2vaD8fSq0H/UB9inNbc8KDcVmW6fYAfSWkkn+OAdhTTMVKqXXrZuZBWyFfSD5Ze7VN6acr4hrOQm2bmrA== + dependencies: + "@react-aria/ssr" "^3.9.6" + "@react-stately/utils" "^3.10.4" + "@react-types/shared" "^3.25.0" + "@swc/helpers" "^0.5.0" + clsx "^2.0.0" + +"@react-stately/utils@^3.10.4": + version "3.10.4" + resolved "https://registry.yarnpkg.com/@react-stately/utils/-/utils-3.10.4.tgz#310663a834b67048d305e1680ed258130092fe51" + integrity sha512-gBEQEIMRh5f60KCm7QKQ2WfvhB2gLUr9b72sqUdIZ2EG+xuPgaIlCBeSicvjmjBvYZwOjoOEnmIkcx2GHp/HWw== + dependencies: + "@swc/helpers" "^0.5.0" + +"@react-types/shared@^3.25.0": + version "3.25.0" + resolved "https://registry.yarnpkg.com/@react-types/shared/-/shared-3.25.0.tgz#7223baf72256e918a3c29081bb1ecc6fad4fbf58" + integrity sha512-OZSyhzU6vTdW3eV/mz5i6hQwQUhkRs7xwY2d1aqPvTdMe0+2cY7Fwp45PAiwYLEj73i9ro2FxF9qC4DvHGSCgQ== + "@rtsao/scc@^1.1.0": version "1.1.0" resolved "https://registry.yarnpkg.com/@rtsao/scc/-/scc-1.1.0.tgz#927dd2fae9bc3361403ac2c7a00c32ddce9ad7e8" @@ -2501,6 +2913,13 @@ "@swc/legacy-helpers" "npm:@swc/helpers@=0.4.14" tslib "^2.4.0" +"@swc/helpers@^0.5.0": + version "0.5.15" + resolved "https://registry.yarnpkg.com/@swc/helpers/-/helpers-0.5.15.tgz#79efab344c5819ecf83a43f3f9f811fc84b516d7" + integrity sha512-JQ5TuMi45Owi4/BIMAJBoSQoOJu12oOk/gADqlcUL9JEdHB8vyjUSsxqeNXnmXHjYKMi2WcYtezGEEhqUI/E2g== + dependencies: + tslib "^2.8.0" + "@swc/legacy-helpers@npm:@swc/helpers@=0.4.14": version "0.4.14" resolved "https://registry.yarnpkg.com/@swc/helpers/-/helpers-0.4.14.tgz#1352ac6d95e3617ccb7c1498ff019654f1e12a74" @@ -2532,17 +2951,17 @@ lodash.merge "^4.6.2" postcss-selector-parser "6.0.10" -"@tanstack/react-virtual@^3.0.0-beta.60": - version "3.10.8" - resolved "https://registry.yarnpkg.com/@tanstack/react-virtual/-/react-virtual-3.10.8.tgz#bf4b06f157ed298644a96ab7efc1a2b01ab36e3c" - integrity sha512-VbzbVGSsZlQktyLrP5nxE+vE1ZR+U0NFAWPbJLoG2+DKPwd2D7dVICTVIIaYlJqX1ZCEnYDbaOpmMwbsyhBoIA== +"@tanstack/react-virtual@^3.8.1": + version "3.10.9" + resolved "https://registry.yarnpkg.com/@tanstack/react-virtual/-/react-virtual-3.10.9.tgz#40606b6dd8aba8e977f576d8f7df07f69ca63eea" + integrity sha512-OXO2uBjFqA4Ibr2O3y0YMnkrRWGVNqcvHQXmGvMu6IK8chZl3PrDxFXdGZ2iZkSrKh3/qUYoFqYe+Rx23RoU0g== dependencies: - "@tanstack/virtual-core" "3.10.8" + "@tanstack/virtual-core" "3.10.9" -"@tanstack/virtual-core@3.10.8": - version "3.10.8" - resolved "https://registry.yarnpkg.com/@tanstack/virtual-core/-/virtual-core-3.10.8.tgz#975446a667755222f62884c19e5c3c66d959b8b4" - integrity sha512-PBu00mtt95jbKFi6Llk9aik8bnR3tR/oQP1o3TSi+iG//+Q2RTIzCEgKkHG8BB86kxMNW6O8wku+Lmi+QFR6jA== +"@tanstack/virtual-core@3.10.9": + version "3.10.9" + resolved "https://registry.yarnpkg.com/@tanstack/virtual-core/-/virtual-core-3.10.9.tgz#55710c92b311fdaa8d8c66682a0dbdd684bc77c4" + integrity sha512-kBknKOKzmeR7lN+vSadaKWXaLS0SZZG+oqpQ/k80Q6g9REn6zRHS/ZYdrIzHnpHgy/eWs00SujveUN/GJT2qTw== "@tokenizer/token@^0.3.0": version "0.3.0" @@ -2554,18 +2973,6 @@ resolved "https://registry.yarnpkg.com/@trysound/sax/-/sax-0.2.0.tgz#cccaab758af56761eb7bf37af6f03f326dd798ad" integrity sha512-L7z9BgrNEcYyUYtF+HaEfiS5ebkh9jXqbszz7pC0hRBPaatV0XjSD3+eHrpqFemQfgwiFF0QPIarnIihIDn7OA== -"@turist/fetch@^7.2.0": - version "7.2.0" - resolved "https://registry.yarnpkg.com/@turist/fetch/-/fetch-7.2.0.tgz#57df869df1cd9b299588554eec4b8543effcc714" - integrity sha512-2x7EGw+6OJ29phunsbGvtxlNmSfcuPcyYudkMbi8gARCP9eJ1CtuMvnVUHL//O9Ixi9SJiug8wNt6lj86pN8XQ== - dependencies: - "@types/node-fetch" "2" - -"@turist/time@^0.0.2": - version "0.0.2" - resolved "https://registry.yarnpkg.com/@turist/time/-/time-0.0.2.tgz#32fe0ce708ea0f4512776bd313409f1459976dda" - integrity sha512-qLOvfmlG2vCVw5fo/oz8WAZYlpe5a5OurgTj3diIxJCdjRHpapC+vQCz3er9LV79Vcat+DifBjeAhOAdmndtDQ== - "@types/acorn@^4.0.0": version "4.0.6" resolved "https://registry.yarnpkg.com/@types/acorn/-/acorn-4.0.6.tgz#d61ca5480300ac41a7d973dd5b84d0a591154a22" @@ -2809,14 +3216,6 @@ resolved "https://registry.yarnpkg.com/@types/ms/-/ms-0.7.34.tgz#10964ba0dee6ac4cd462e2795b6bebd407303433" integrity sha512-nG96G3Wp6acyAgJqGasjODb+acrI7KltPiRxzHPXnP3NgI28bpQDRv53olbqGXbfcgF5aiiHmO3xpwEpS5Ld9g== -"@types/node-fetch@2": - version "2.6.11" - resolved "https://registry.yarnpkg.com/@types/node-fetch/-/node-fetch-2.6.11.tgz#9b39b78665dae0e82a08f02f4967d62c66f95d24" - integrity sha512-24xFj9R5+rfQJLRyM56qh+wnVSYhyXC2tkoBndtY0U+vubqNsYXGjufB2nn8Q6gt0LrARwL6UBtMCSVCwl4B1g== - dependencies: - "@types/node" "*" - form-data "^4.0.0" - "@types/node@*", "@types/node@>=10.0.0": version "22.8.6" resolved "https://registry.yarnpkg.com/@types/node/-/node-22.8.6.tgz#e8a0c0871623283d8b3ef7d7b9b1bfdfd3028e22" @@ -2829,12 +3228,12 @@ resolved "https://registry.yarnpkg.com/@types/node/-/node-17.0.45.tgz#2c0fafd78705e7a18b7906b5201a522719dc5190" integrity sha512-w+tIMs3rq2afQdsPJlODhoUEKzFP1ayaoyl1CcnwtIlsVe7K7bA1NGm4s3PraqTLlXnbIN84zuBlxBWo1u9BLw== -"@types/node@^20.11.19": - version "20.17.5" - resolved "https://registry.yarnpkg.com/@types/node/-/node-20.17.5.tgz#b7a1d8619ced7ce1da901b07a47c61107272449a" - integrity sha512-n8FYY/pRxu496441gIcAQFZPKXbhsd6VZygcq+PTSZ75eMh/Ke0hCAROdUa21qiFqKNsPPYic46yXDO1JGiPBQ== +"@types/node@^22.9.0": + version "22.9.0" + resolved "https://registry.yarnpkg.com/@types/node/-/node-22.9.0.tgz#b7f16e5c3384788542c72dc3d561a7ceae2c0365" + integrity sha512-vuyHg81vvWA1Z1ELfvLko2c8f34gyA0zaic0+Rllc5lbCnbSyuvb2Oxpm6TAUAC/2xZN3QGqxBNggD1nNR2AfQ== dependencies: - undici-types "~6.19.2" + undici-types "~6.19.8" "@types/node@^8.5.7": version "8.10.66" @@ -2907,6 +3306,11 @@ resolved "https://registry.yarnpkg.com/@types/semver/-/semver-7.5.8.tgz#8268a8c57a3e4abd25c165ecd36237db7948a55e" integrity sha512-I8EUhyrgfLrcTkzV3TSsGyl1tSuPrEDzr0yd5m90UgNxQkyDXULk3b6MlQqTCpZpNtWe1K0hzclnZkTcLBe2UQ== +"@types/stylis@4.2.5": + version "4.2.5" + resolved "https://registry.yarnpkg.com/@types/stylis/-/stylis-4.2.5.tgz#1daa6456f40959d06157698a653a9ab0a70281df" + integrity sha512-1Xve+NMN7FWjY14vLoY5tL3BVEQ/n42YLwaqJIPYhotZ9uBHt87VceMwWQpzmdEt2TNXIorIFG+YeCUUW7RInw== + "@types/tmp@^0.0.33": version "0.0.33" resolved "https://registry.yarnpkg.com/@types/tmp/-/tmp-0.0.33.tgz#1073c4bc824754ae3d10cfab88ab0237ba964e4d" @@ -3364,56 +3768,56 @@ ansi-styles@^6.1.0: resolved "https://registry.yarnpkg.com/ansi-styles/-/ansi-styles-6.2.1.tgz#0e62320cf99c21afff3b3012192546aacbfb05c5" integrity sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug== -antd@^5.1.0: - version "5.21.6" - resolved "https://registry.yarnpkg.com/antd/-/antd-5.21.6.tgz#60d8eb442f7f8019bab198e30ea5a8d6d224a6ce" - integrity sha512-EviOde/VEu+OsIKH5t6YXTMmmNeg9R85m0W5zXAo+Np8Latg9q10691JvAqOTMpnrRmbdeKUQL1Krp69Bzbe/g== +antd@^5.22.1: + version "5.22.1" + resolved "https://registry.yarnpkg.com/antd/-/antd-5.22.1.tgz#726c9d4465a1f59079ef60896418fcdbb07a796c" + integrity sha512-itq8AZwe3IfawZH6SMM5XdbTz1xXGTTqA7sNN0qpEdxcoTpD5nRsCBAMIy+PhwcWFobgFc6ZlF8d7f8eicn0SQ== dependencies: "@ant-design/colors" "^7.1.0" "@ant-design/cssinjs" "^1.21.1" "@ant-design/cssinjs-utils" "^1.1.1" "@ant-design/icons" "^5.5.1" "@ant-design/react-slick" "~1.1.2" - "@babel/runtime" "^7.25.6" + "@babel/runtime" "^7.25.7" "@ctrl/tinycolor" "^3.6.1" "@rc-component/color-picker" "~2.0.1" "@rc-component/mutate-observer" "^1.1.0" "@rc-component/qrcode" "~1.0.0" "@rc-component/tour" "~1.15.1" - "@rc-component/trigger" "^2.2.3" + "@rc-component/trigger" "^2.2.5" classnames "^2.5.1" copy-to-clipboard "^3.3.3" dayjs "^1.11.11" - rc-cascader "~3.28.2" + rc-cascader "~3.30.0" rc-checkbox "~3.3.0" - rc-collapse "~3.8.0" + rc-collapse "~3.9.0" rc-dialog "~9.6.0" rc-drawer "~7.2.0" rc-dropdown "~4.2.0" - rc-field-form "~2.4.0" + rc-field-form "~2.5.0" rc-image "~7.11.0" rc-input "~1.6.3" - rc-input-number "~9.2.0" - rc-mentions "~2.16.1" - rc-menu "~9.15.1" + rc-input-number "~9.3.0" + rc-mentions "~2.17.0" + rc-menu "~9.16.0" rc-motion "^2.9.3" rc-notification "~5.6.2" rc-pagination "~4.3.0" - rc-picker "~4.6.15" + rc-picker "~4.8.0" rc-progress "~4.0.0" rc-rate "~2.13.0" rc-resize-observer "^1.4.0" rc-segmented "~2.5.0" - rc-select "~14.15.2" + rc-select "~14.16.3" rc-slider "~11.1.7" rc-steps "~6.0.1" rc-switch "~4.1.0" - rc-table "~7.47.5" - rc-tabs "~15.3.0" + rc-table "~7.48.1" + rc-tabs "~15.4.0" rc-textarea "~1.8.2" rc-tooltip "~6.2.1" - rc-tree "~5.9.0" - rc-tree-select "~5.23.0" + rc-tree "~5.10.1" + rc-tree-select "~5.24.4" rc-upload "~4.8.1" rc-util "^5.43.0" scroll-into-view-if-needed "^3.1.0" @@ -3489,11 +3893,6 @@ array-includes@^3.1.6, array-includes@^3.1.8: get-intrinsic "^1.2.4" is-string "^1.0.7" -array-tree-filter@^2.1.0: - version "2.1.0" - resolved "https://registry.yarnpkg.com/array-tree-filter/-/array-tree-filter-2.1.0.tgz#873ac00fec83749f255ac8dd083814b4f6329190" - integrity sha512-4ROwICNlNw/Hqa9v+rk5h22KjmzB1JGTMVKP2AKJBOCgb0yL0ASf0+YvCcLNNwquOHNX48jkeZIJ3a+oOQqKcw== - array-union@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/array-union/-/array-union-2.1.0.tgz#b798420adbeb1de828d84acd8a2e23d3efe85e8d" @@ -3598,7 +3997,7 @@ async@1.5.2: resolved "https://registry.yarnpkg.com/async/-/async-1.5.2.tgz#ec6a61ae56480c0c3cb241c95618e20892f9672a" integrity sha512-nSVgobk4rv61R9PUSDtYt7mPVB2olxNR5RWJcAsH676/ef11bUZwvu7+RGYrYauVdDPcO519v68wRhXQtxsV9w== -async@^3.2.4: +async@^3.2.5: version "3.2.6" resolved "https://registry.yarnpkg.com/async/-/async-3.2.6.tgz#1b0728e14929d51b85b449b7f06e27c1145e38ce" integrity sha512-htCUDlxyyCLMgaM3xXg0C0LW2xqfuQ6p05pCEIsXuyQ+a1koYKTuBMzRNwmybfLgvJDMd0r1LTn4+E0Ti6C2AA== @@ -3642,12 +4041,14 @@ axe-core@^4.10.0: resolved "https://registry.yarnpkg.com/axe-core/-/axe-core-4.10.2.tgz#85228e3e1d8b8532a27659b332e39b7fa0e022df" integrity sha512-RE3mdQ7P3FRSe7eqCWoeQ/Z9QXrtniSjp1wUjt5nRC3WIpz5rSCve6o3fsZ2aCpJtrZjSZgjwXAoTO5k4tEI0w== -axios@^0.21.1: - version "0.21.4" - resolved "https://registry.yarnpkg.com/axios/-/axios-0.21.4.tgz#c67b90dc0568e5c1cf2b0b858c43ba28e2eda575" - integrity sha512-ut5vewkiu8jjGBdqpM44XxjuCjq9LAKeHVmoVfHVzy8eHgxxq8SbAVQNovDA8mVi05kP0Ea/n/UzcSHcTJQfNg== +axios@^1.6.4: + version "1.7.7" + resolved "https://registry.yarnpkg.com/axios/-/axios-1.7.7.tgz#2f554296f9892a72ac8d8e4c5b79c14a91d0a47f" + integrity sha512-S4kL7XrjgBmvdGut0sN3yJxqYzrDOnivkBiN0OFs6hLiUam3UPvswUo0kqGyhqUZGEOytHyumEdXsAkgCOUf3Q== dependencies: - follow-redirects "^1.14.0" + follow-redirects "^1.15.6" + form-data "^4.0.0" + proxy-from-env "^1.1.0" axobject-query@^4.1.0: version "4.1.0" @@ -3719,14 +4120,14 @@ babel-plugin-polyfill-regenerator@^0.6.1: dependencies: "@babel/helper-define-polyfill-provider" "^0.6.2" -babel-plugin-remove-graphql-queries@^5.13.1: - version "5.13.1" - resolved "https://registry.yarnpkg.com/babel-plugin-remove-graphql-queries/-/babel-plugin-remove-graphql-queries-5.13.1.tgz#bf1feb392d8a1f74046f9584c72c952663a10695" - integrity sha512-yncJ/W6Un48aBRpK/rmdpQOMcr4+EmJ3oi2Wq1zXKu8WLlw+j93KTbejf7fg2msm8GUskb/+9Nnpz7oMCqO9aA== +babel-plugin-remove-graphql-queries@^5.14.0: + version "5.14.0" + resolved "https://registry.yarnpkg.com/babel-plugin-remove-graphql-queries/-/babel-plugin-remove-graphql-queries-5.14.0.tgz#922ac7c23591434b800fedf163fed042105f3aed" + integrity sha512-k5NlcywGAiMklF7LfBSIuPlNYDdHQYTSuDdNDB1shEzxxmvd9akk+bzPE4qf2w/yvMirCkGSJOQud7w33HH2HQ== dependencies: "@babel/runtime" "^7.20.13" "@babel/types" "^7.20.7" - gatsby-core-utils "^4.13.1" + gatsby-core-utils "^4.14.0" babel-plugin-syntax-trailing-function-commas@^7.0.0-beta.0: version "7.0.0-beta.0" @@ -3771,10 +4172,10 @@ babel-preset-fbjs@^3.4.0: "@babel/plugin-transform-template-literals" "^7.0.0" babel-plugin-syntax-trailing-function-commas "^7.0.0-beta.0" -babel-preset-gatsby@^3.13.2: - version "3.13.2" - resolved "https://registry.yarnpkg.com/babel-preset-gatsby/-/babel-preset-gatsby-3.13.2.tgz#61f9f3a11f96577ca889c6061096c67ef287ec47" - integrity sha512-1zZ3Fpt9jD63inJXWUF2hA6U2cBAMYFDSC5hKqnSSVbNUzKlHUcY0Vbx8azBSaHg27TVp9BitR10zvq5AHP/OQ== +babel-preset-gatsby@^3.14.0: + version "3.14.0" + resolved "https://registry.yarnpkg.com/babel-preset-gatsby/-/babel-preset-gatsby-3.14.0.tgz#17bb148e91c93735c056c8030c34f1477dfd2926" + integrity sha512-yE/1gmohqy+Y/v8RYWMyJ1WLrACSkblS8LRcByeNqqBPrDAaG5T/bU1Lhc75BW8j9iLDOkLilun3fGZRu5nacA== dependencies: "@babel/plugin-proposal-class-properties" "^7.18.6" "@babel/plugin-proposal-nullish-coalescing-operator" "^7.18.6" @@ -3789,8 +4190,8 @@ babel-preset-gatsby@^3.13.2: babel-plugin-dynamic-import-node "^2.3.3" babel-plugin-macros "^3.1.0" babel-plugin-transform-react-remove-prop-types "^0.4.24" - gatsby-core-utils "^4.13.1" - gatsby-legacy-polyfills "^3.13.1" + gatsby-core-utils "^4.14.0" + gatsby-legacy-polyfills "^3.14.0" bail@^2.0.0: version "2.0.2" @@ -3888,24 +4289,6 @@ bluebird@^3.7.2: resolved "https://registry.yarnpkg.com/bluebird/-/bluebird-3.7.2.tgz#9f229c15be272454ffa973ace0dbee79a1b0c36f" integrity sha512-XpNj6GDQzdfW+r2Wnn7xiSAd7TM3jzkxGXBGTtWKuSXv1xUV+azxAm8jdWZN06QTQk+2N2XB9jRDkvbmQmcRtg== -body-parser@1.20.1: - version "1.20.1" - resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.20.1.tgz#b1812a8912c195cd371a3ee5e66faa2338a5c668" - integrity sha512-jWi7abTbYwajOytWCQc37VulmWiRae5RyTpaCyDcS5/lMdtwSz5lOpDE67srw/HYe35f1z3fDQw+3txg7gNtWw== - dependencies: - bytes "3.1.2" - content-type "~1.0.4" - debug "2.6.9" - depd "2.0.0" - destroy "1.2.0" - http-errors "2.0.0" - iconv-lite "0.4.24" - on-finished "2.4.1" - qs "6.11.0" - raw-body "2.5.1" - type-is "~1.6.18" - unpipe "1.0.0" - body-parser@1.20.3: version "1.20.3" resolved "https://registry.yarnpkg.com/body-parser/-/body-parser-1.20.3.tgz#1953431221c6fb5cd63c4b36d53fab0928e548c6" @@ -3982,6 +4365,11 @@ bser@2.1.1: dependencies: node-int64 "^0.4.0" +bubblesets-js@^2.3.4: + version "2.3.4" + resolved "https://registry.yarnpkg.com/bubblesets-js/-/bubblesets-js-2.3.4.tgz#8e1230b29c309e3327a05630fe02df3d96596ab6" + integrity sha512-DyMjHmpkS2+xcFNtyN00apJYL3ESdp9fTrkDr5+9Qg/GPqFmcWgGsK1akZnttE1XFxJ/VMy4DNNGMGYtmFp1Sg== + buffer-from@^1.0.0: version "1.1.2" resolved "https://registry.yarnpkg.com/buffer-from/-/buffer-from-1.1.2.tgz#2b146a6fd72e80b4f55d255f35ed59a3a9a41bd5" @@ -4096,6 +4484,11 @@ camelcase@^6.2.0, camelcase@^6.3.0: resolved "https://registry.yarnpkg.com/camelcase/-/camelcase-6.3.0.tgz#5685b95eb209ac9c0c177467778c9c84df58ba9a" integrity sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA== +camelize@^1.0.0: + version "1.0.1" + resolved "https://registry.yarnpkg.com/camelize/-/camelize-1.0.1.tgz#89b7e16884056331a35d6b5ad064332c91daa6c3" + integrity sha512-dU+Tx2fsypxTgtLoE36npi3UqcjSSMNYfkqgmoEhtZrraP5VWq0K7FkWVTYa8eMPtnU/G2txVsfdCJTn9uzpuQ== + caniuse-api@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/caniuse-api/-/caniuse-api-3.0.0.tgz#5e4d90e2274961d46291997df599e3ed008ee4c0" @@ -4293,11 +4686,6 @@ cli-width@^3.0.0: resolved "https://registry.yarnpkg.com/cli-width/-/cli-width-3.0.0.tgz#a2f48437a2caa9a22436e794bf071ec9e61cedf6" integrity sha512-FxqpkPPwu1HjuN93Omfm4h8uIanXofW0RxVEW3k5RKx+mJJYSthzNhp32Kzxxy3YAEZ/Dc/EWN1vZRY0+kOhbw== -client-only@^0.0.1: - version "0.0.1" - resolved "https://registry.yarnpkg.com/client-only/-/client-only-0.0.1.tgz#38bba5d403c41ab150bff64a95c85013cf73bca1" - integrity sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA== - clipboardy@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/clipboardy/-/clipboardy-4.0.0.tgz#e73ced93a76d19dd379ebf1f297565426dffdca1" @@ -4346,6 +4734,11 @@ clone@^2.1.1: resolved "https://registry.yarnpkg.com/clone/-/clone-2.1.2.tgz#1b7f4b9f591f1e8f83670401600345a02887435f" integrity sha512-3Pe/CF1Nn94hyhIYpjtiLhdCoEoz0DqQ+988E9gmeEdQZlojxnOb74wctFyuwWQHzqyf9X7C7MG8juUpqBJT8w== +clsx@^2.0.0: + version "2.1.1" + resolved "https://registry.yarnpkg.com/clsx/-/clsx-2.1.1.tgz#eed397c9fd8bd882bfb18deab7102049a2f32999" + integrity sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA== + color-convert@^1.9.0: version "1.9.3" resolved "https://registry.yarnpkg.com/color-convert/-/color-convert-1.9.3.tgz#bb71850690e1f136567de629d2d5471deda4c1e8" @@ -4391,10 +4784,10 @@ colord@^2.9.1: resolved "https://registry.yarnpkg.com/colord/-/colord-2.9.3.tgz#4f8ce919de456f1d5c1c368c307fe20f3e59fb43" integrity sha512-jeC1axXpnb0/2nn/Y1LPuLdgXBLH7aDcHu4KEKfqw3CUhX7ZpfBSlPKyqXE6btIgEzfWtrX3/tyBCaCvXvMkOw== -colorette@^1.2.2: - version "1.4.0" - resolved "https://registry.yarnpkg.com/colorette/-/colorette-1.4.0.tgz#5190fbb87276259a86ad700bff2c6d6faa3fca40" - integrity sha512-Y2oEozpomLn7Q3HFP7dpww7AtMJplbM9lGZP6RDfHqmbeRjiwRg4n6VM6j4KLmRke85uWEI7JqF17f3pqdRA0g== +colorette@^2.0.10: + version "2.0.20" + resolved "https://registry.yarnpkg.com/colorette/-/colorette-2.0.20.tgz#9eb793e6833067f7235902fcd3b09917a000a95a" + integrity sha512-IfEDxwoWIjkeXL1eXcDiow4UbKjhLdq6/EuSVR9GMN7KVH3r9gQ83e73hsz1Nd1T3ijd5xv1wcWRYO+D6kCI2w== combined-stream@^1.0.8: version "1.0.8" @@ -4403,6 +4796,11 @@ combined-stream@^1.0.8: dependencies: delayed-stream "~1.0.0" +comlink@^4.4.1: + version "4.4.2" + resolved "https://registry.yarnpkg.com/comlink/-/comlink-4.4.2.tgz#cbbcd82742fbebc06489c28a183eedc5c60a2bca" + integrity sha512-OxGdvBmJuNKSCMO4NTl1L47VRp6xn2wG4F/2hYzB6tiCb709otOxtEYCSvK80PtjODfXXZu8ds+Nw5kVCjqd2g== + comma-separated-tokens@^2.0.0: version "2.0.3" resolved "https://registry.yarnpkg.com/comma-separated-tokens/-/comma-separated-tokens-2.0.3.tgz#4e89c9458acb61bc8fef19f4529973b2392839ee" @@ -4635,10 +5033,10 @@ cosmiconfig@^8.3.5: parse-json "^5.2.0" path-type "^4.0.0" -create-gatsby@^3.13.1: - version "3.13.1" - resolved "https://registry.yarnpkg.com/create-gatsby/-/create-gatsby-3.13.1.tgz#394f5ad7647409f5651057f46e93c1cee1efc47d" - integrity sha512-CCg8Vz/iQs1cgMEzyRlVGMvNs8ivE/2w+TL6yS56FVe1JjOou8nKYHzxnWxRmBUtC7rTfjxVaTESIotuYBsltQ== +create-gatsby@^3.14.0: + version "3.14.0" + resolved "https://registry.yarnpkg.com/create-gatsby/-/create-gatsby-3.14.0.tgz#5934709d48b6d3a7ca32466b15349ba5de3359a2" + integrity sha512-HR90NlcTIAeo7OwcFfGubKa35+/Aj7lEfgcV4n4IIPCMy3A+gYwOEQcLIGtmPCVIl1C9AhT3/0XaWbnhAcbyGQ== dependencies: "@babel/runtime" "^7.20.13" @@ -4650,9 +5048,9 @@ cross-fetch@^3.1.5: node-fetch "^2.6.12" cross-spawn@^7.0.0, cross-spawn@^7.0.2, cross-spawn@^7.0.3: - version "7.0.3" - resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.3.tgz#f73a85b9d5d41d045551c177e2882d4ac85728a6" - integrity sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w== + version "7.0.5" + resolved "https://registry.yarnpkg.com/cross-spawn/-/cross-spawn-7.0.5.tgz#910aac880ff5243da96b728bc6521a5f6c2f2f82" + integrity sha512-ZVJrKKYunU38/76t0RMOulHOnUcbU9GbpWKAOZ0mhjr7CX6FVrH+4FrAapSOekrgFQ3f/8gwMEuIft0aKq6Hug== dependencies: path-key "^3.1.0" shebang-command "^2.0.0" @@ -4663,6 +5061,11 @@ crypto-random-string@^2.0.0: resolved "https://registry.yarnpkg.com/crypto-random-string/-/crypto-random-string-2.0.0.tgz#ef2a7a966ec11083388369baa02ebead229b30d5" integrity sha512-v1plID3y9r/lPhviJ1wrXpLeyUIGAZ2SHNYTEapm7/8A9nLPoyvVp3RK/EPFqn5kEznyWgYZNsRtYYIWbuG8KA== +css-color-keywords@^1.0.0: + version "1.0.0" + resolved "https://registry.yarnpkg.com/css-color-keywords/-/css-color-keywords-1.0.0.tgz#fea2616dc676b2962686b3af8dbdbe180b244e05" + integrity sha512-FyyrDHZKEjXDpNJYvVsV960FiqQyXc/LlYmsxl2BcdMb2WPx0OGRVgTg55rPSyLSNMqP52R9r8geSp7apN3Ofg== + css-declaration-sorter@^6.3.1: version "6.4.1" resolved "https://registry.yarnpkg.com/css-declaration-sorter/-/css-declaration-sorter-6.4.1.tgz#28beac7c20bad7f1775be3a7129d7eae409a3a71" @@ -4713,6 +5116,15 @@ css-selector-parser@^1.0.0: resolved "https://registry.yarnpkg.com/css-selector-parser/-/css-selector-parser-1.4.1.tgz#03f9cb8a81c3e5ab2c51684557d5aaf6d2569759" integrity sha512-HYPSb7y/Z7BNDCOrakL4raGO2zltZkbeXyAd6Tg9obzix6QhzxCotdBl6VT0Dv4vZfJGVz3WL/xaEI9Ly3ul0g== +css-to-react-native@3.2.0: + version "3.2.0" + resolved "https://registry.yarnpkg.com/css-to-react-native/-/css-to-react-native-3.2.0.tgz#cdd8099f71024e149e4f6fe17a7d46ecd55f1e32" + integrity sha512-e8RKaLXMOFii+02mOlqwjbD00KSEKqblnpO9e++1aXS1fPQOpS1YoqdVHBqPjHNoxeF2mimzVqawm2KCbEdtHQ== + dependencies: + camelize "^1.0.0" + css-color-keywords "^1.0.0" + postcss-value-parser "^4.0.2" + css-tree@^1.1.2, css-tree@^1.1.3: version "1.1.3" resolved "https://registry.yarnpkg.com/css-tree/-/css-tree-1.1.3.tgz#eb4870fb6fd7707327ec95c2ff2ab09b5e8db91d" @@ -4792,7 +5204,7 @@ csso@^4.2.0: dependencies: css-tree "^1.1.2" -csstype@^3.0.2, csstype@^3.0.8, csstype@^3.1.3: +csstype@3.1.3, csstype@^3.0.2, csstype@^3.0.8, csstype@^3.1.3: version "3.1.3" resolved "https://registry.yarnpkg.com/csstype/-/csstype-3.1.3.tgz#d80ff294d114fb0e6ac500fbf85b60137d7eff81" integrity sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw== @@ -4804,6 +5216,11 @@ csstype@^3.0.2, csstype@^3.0.8, csstype@^3.1.3: dependencies: internmap "1 - 2" +d3-binarytree@1: + version "1.0.2" + resolved "https://registry.yarnpkg.com/d3-binarytree/-/d3-binarytree-1.0.2.tgz#ed43ebc13c70fbabfdd62df17480bc5a425753cc" + integrity sha512-cElUNH+sHu95L04m92pG73t2MEJXKu+GeKUN1TJkFsu93E5W8E9Sc3kHEGJKgenGvj19m6upSn2EunvMgMD2Yw== + "d3-color@1 - 3", d3-color@^3.1.0: version "3.1.0" resolved "https://registry.yarnpkg.com/d3-color/-/d3-color-3.1.0.tgz#395b2833dfac71507f12ac2f7af23bf819de24e2" @@ -4836,6 +5253,17 @@ d3-dsv@^3.0.1: resolved "https://registry.yarnpkg.com/d3-ease/-/d3-ease-3.0.1.tgz#9658ac38a2140d59d346160f1f6c30fda0bd12f4" integrity sha512-wR/XK3D3XcLIZwpbvQwQ5fK+8Ykds1ip7A2Txe0yxncXSdq1L9skcG7blcedkOX+ZcgxGAmLX1FrRGbADwzi0w== +d3-force-3d@^3.0.5: + version "3.0.5" + resolved "https://registry.yarnpkg.com/d3-force-3d/-/d3-force-3d-3.0.5.tgz#9c8931b49acc3554f9110e128bc580cd3ab830f2" + integrity sha512-tdwhAhoTYZY/a6eo9nR7HP3xSW/C6XvJTbeRpR92nlPzH6OiE+4MliN9feuSFd0tPtEUo+191qOhCTWx3NYifg== + dependencies: + d3-binarytree "1" + d3-dispatch "1 - 3" + d3-octree "1" + d3-quadtree "1 - 3" + d3-timer "1 - 3" + d3-force@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/d3-force/-/d3-force-3.0.0.tgz#3e2ba1a61e70888fe3d9194e30d6d14eece155c4" @@ -4869,12 +5297,17 @@ d3-hierarchy@^3.1.2: dependencies: d3-color "1 - 3" +d3-octree@1, d3-octree@^1.0.2: + version "1.0.2" + resolved "https://registry.yarnpkg.com/d3-octree/-/d3-octree-1.0.2.tgz#b39026b82701e45c7163e34ee056dc492035a017" + integrity sha512-Qxg4oirJrNXauiuC94uKMbgxwnhdda9xRLl9ihq45srlJ4Ga3CSgqGcAL8iW7N5CIv4Oz8x3E734ulxyvHPvwA== + d3-path@^3.1.0: version "3.1.0" resolved "https://registry.yarnpkg.com/d3-path/-/d3-path-3.1.0.tgz#22df939032fb5a71ae8b1800d61ddb7851c42526" integrity sha512-p3KP5HCf/bvjBSSKuXid6Zqijx7wIfNW+J/maPs+iwR35at5JCbLUT0LzF1cnjbCHWhqzQTIN2Jpe8pRebIEFQ== -"d3-quadtree@1 - 3": +"d3-quadtree@1 - 3", d3-quadtree@^3.0.1: version "3.0.1" resolved "https://registry.yarnpkg.com/d3-quadtree/-/d3-quadtree-3.0.1.tgz#6dca3e8be2b393c9a9d514dabbd80a92deef1a4f" integrity sha512-04xDrxQTDTCFwP5H6hRhsRcb9xxv2RzkcsygFzmkSIOJy3PeRJP7sNk3VRIbKXcog561P9oU0/rVH6vDROAgUw== @@ -4934,6 +5367,14 @@ d@1, d@^1.0.1, d@^1.0.2: es5-ext "^0.10.64" type "^2.7.2" +dagre@^0.8.5: + version "0.8.5" + resolved "https://registry.yarnpkg.com/dagre/-/dagre-0.8.5.tgz#ba30b0055dac12b6c1fcc247817442777d06afee" + integrity sha512-/aTqmnRta7x7MCCpExk7HQL2O4owCT2h8NT//9I1OQ9vt29Pa0BzSAkR5lwFUcQ7491yVi/3CXU9jQ5o0Mn2Sw== + dependencies: + graphlib "^2.1.8" + lodash "^4.17.15" + damerau-levenshtein@^1.0.8: version "1.0.8" resolved "https://registry.yarnpkg.com/damerau-levenshtein/-/damerau-levenshtein-1.0.8.tgz#b43d286ccbd36bc5b2f7ed41caf2d0aba1f8a6e7" @@ -5658,7 +6099,7 @@ eslint-plugin-import@^2.27.5: string.prototype.trimend "^1.0.8" tsconfig-paths "^3.15.0" -eslint-plugin-jsx-a11y@^6.7.1: +eslint-plugin-jsx-a11y@^6.8.0: version "6.10.2" resolved "https://registry.yarnpkg.com/eslint-plugin-jsx-a11y/-/eslint-plugin-jsx-a11y-6.10.2.tgz#d2812bb23bf1ab4665f1718ea442e8372e638483" integrity sha512-scB3nz4WmG75pV8+3eRUQOHZlNSUhFNq37xnpgRkCCELU3XMvXAxLk1eqWWyE22Ki4Q01Fnsw9BA3cJHDPgn2Q== @@ -6077,7 +6518,7 @@ fastest-levenshtein@^1.0.16: resolved "https://registry.yarnpkg.com/fastest-levenshtein/-/fastest-levenshtein-1.0.16.tgz#210e61b6ff181de91ea9b3d1b84fdedd47e034e5" integrity sha512-eRnCtTTtGZFpQCwhJiUOuxPQWRXVKYDn0b2PeHfXL6/Zi53SLAzAHfVhVWK2AryC/WH05kGfxhFIPvTF0SXQzg== -fastq@^1.15.0, fastq@^1.6.0: +fastq@^1.15.0, fastq@^1.16.0, fastq@^1.6.0: version "1.17.1" resolved "https://registry.yarnpkg.com/fastq/-/fastq-1.17.1.tgz#2a523f07a4e7b1e81a42b91b8bf2254107753b47" integrity sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w== @@ -6256,7 +6697,7 @@ fmin@^0.0.2: tape "^4.5.1" uglify-js "^2.6.2" -follow-redirects@^1.14.0: +follow-redirects@^1.15.6: version "1.15.9" resolved "https://registry.yarnpkg.com/follow-redirects/-/follow-redirects-1.15.9.tgz#a604fa10e443bf98ca94228d9eebcc2e8a2c8ee1" integrity sha512-gew4GsXizNgdoRyqmyfMHyAmXsZDk6mHkSxZFCzW9gwlbtOW44CDtYavM+y+72qD/Vq2l550kMF52DT8fOLJqQ== @@ -6334,7 +6775,7 @@ fs-exists-cached@1.0.0, fs-exists-cached@^1.0.0: resolved "https://registry.yarnpkg.com/fs-exists-cached/-/fs-exists-cached-1.0.0.tgz#cf25554ca050dc49ae6656b41de42258989dcbce" integrity sha512-kSxoARUDn4F2RPXX48UXnaFKwVU7Ivd/6qpzZL29MCDmr9sTvybv4gFCp+qaI4fM9m0z9fgz/yJvi56GAz+BZg== -fs-extra@^11.1.1: +fs-extra@^11.2.0: version "11.2.0" resolved "https://registry.yarnpkg.com/fs-extra/-/fs-extra-11.2.0.tgz#e70e17dfad64232287d01929399e0ea7c86b0e5b" integrity sha512-PmDi3uwK5nFuXh7XDTlVnS17xJS7vW36is2+w3xcv8SVxiB4NyATf4ctkVY5bkSjX0Y4nbvZCq1/EjtEyr9ktw== @@ -6393,10 +6834,10 @@ functions-have-names@^1.2.3: resolved "https://registry.yarnpkg.com/functions-have-names/-/functions-have-names-1.2.3.tgz#0404fe4ee2ba2f607f0e0ec3c80bae994133b834" integrity sha512-xckBUXyTIqT97tq2x2AMb+g163b5JFysYk0x4qxNFwbfQkmNZoiRHb6sPzI9/QV33WeuvVYBUIiD4NzNIyqaRQ== -gatsby-cli@^5.13.3: - version "5.13.3" - resolved "https://registry.yarnpkg.com/gatsby-cli/-/gatsby-cli-5.13.3.tgz#84c38ded7c73a35f0864b05cc27e8560e87a0ef8" - integrity sha512-JQTeivvZWuhxE1HRey2sRhwUmkCrs5478fiLR0XRFGin4X1ggCmrQx/DBkJP1TgktdPfNU8t987+slSnO2XzMA== +gatsby-cli@^5.14.0: + version "5.14.0" + resolved "https://registry.yarnpkg.com/gatsby-cli/-/gatsby-cli-5.14.0.tgz#dee48b233b1686be7fa27b0f6f9d71c7748a88be" + integrity sha512-NvsWsuZdpNRStmDsNZaYveifFrFqFNy4rnFCLAWWghyxUks8qhGwy6wrz2kcUK+Y2sRJJn1eIjgwzi1AyFn5fQ== dependencies: "@babel/code-frame" "^7.18.6" "@babel/core" "^7.20.12" @@ -6414,13 +6855,12 @@ gatsby-cli@^5.13.3: clipboardy "^4.0.0" common-tags "^1.8.2" convert-hrtime "^3.0.0" - create-gatsby "^3.13.1" + create-gatsby "^3.14.0" envinfo "^7.10.0" execa "^5.1.1" fs-exists-cached "^1.0.0" - fs-extra "^11.1.1" - gatsby-core-utils "^4.13.1" - gatsby-telemetry "^4.13.1" + fs-extra "^11.2.0" + gatsby-core-utils "^4.14.0" hosted-git-info "^3.0.8" is-valid-path "^0.1.1" joi "^17.9.2" @@ -6440,19 +6880,19 @@ gatsby-cli@^5.13.3: yoga-layout-prebuilt "^1.10.0" yurnalist "^2.1.0" -gatsby-core-utils@^4.13.1: - version "4.13.1" - resolved "https://registry.yarnpkg.com/gatsby-core-utils/-/gatsby-core-utils-4.13.1.tgz#57955316486cc85ab150922f481484bc9287205e" - integrity sha512-w7G6SsQr8T2q+AJ1MxvRNGocCt+wjc22MiRLj2Zi3Ijpjszbr818JxwI4+aPt8WOSHlKT5SYCHICnEvcYPm9gg== +gatsby-core-utils@^4.14.0: + version "4.14.0" + resolved "https://registry.yarnpkg.com/gatsby-core-utils/-/gatsby-core-utils-4.14.0.tgz#d044327b9399f0449674f8abf1037d1a3ab4e84e" + integrity sha512-h0v20gB213PmhKjioCJ93SrUb7Hihnqxd6X6Iur4u1eiWTUDsGeV9g1bkquiuDl2qovUnjj7mOoHdWiu/Ax/9Q== dependencies: "@babel/runtime" "^7.20.13" ci-info "2.0.0" configstore "^5.0.1" fastq "^1.15.0" file-type "^16.5.4" - fs-extra "^11.1.1" + fs-extra "^11.2.0" got "^11.8.6" - hash-wasm "^4.9.0" + hash-wasm "^4.11.0" import-from "^4.0.0" lmdb "2.5.3" lock "^1.1.0" @@ -6462,48 +6902,48 @@ gatsby-core-utils@^4.13.1: tmp "^0.2.1" xdg-basedir "^4.0.0" -gatsby-graphiql-explorer@^3.13.1: - version "3.13.1" - resolved "https://registry.yarnpkg.com/gatsby-graphiql-explorer/-/gatsby-graphiql-explorer-3.13.1.tgz#906f5b75f9c01ca5cd836de6cda60e376126e3e2" - integrity sha512-WR6jv18OhKLwK/hGZ6ZGUd82GFtM6gHz+sOpgsJJx/+uqRjZwqcrH2LSrWRSWUCk7FoZY1rJuBSD1QkOODV01A== +gatsby-graphiql-explorer@^3.14.0: + version "3.14.0" + resolved "https://registry.yarnpkg.com/gatsby-graphiql-explorer/-/gatsby-graphiql-explorer-3.14.0.tgz#9a10f040ede72c104cf5af23561ba4fc01d43161" + integrity sha512-t+PpMu+6GkCdyGDw8S4pd1FBZVwFdpn6Jb2BLZtNJ2z1hOSxHKGoZO1sW2mwZ8/H1VuiSPb2XtXwHYo5CcYgAg== -gatsby-legacy-polyfills@^3.13.1: - version "3.13.1" - resolved "https://registry.yarnpkg.com/gatsby-legacy-polyfills/-/gatsby-legacy-polyfills-3.13.1.tgz#6b18f6979a2dda4e9e0ffd0a453479a90a583385" - integrity sha512-NjR3B/rq6dsJuaMmeHlGExdVXJfDqVWERXi9ROfIBt7O3Fwzy5WYgoPeVikVZE06DmeZWlBzuNcGSfc8lilB5g== +gatsby-legacy-polyfills@^3.14.0: + version "3.14.0" + resolved "https://registry.yarnpkg.com/gatsby-legacy-polyfills/-/gatsby-legacy-polyfills-3.14.0.tgz#45531260454c5e77f38cd1162a746ee1da4edd2d" + integrity sha512-CsNguwA88GgLgENx6WEuT8DDl+dus8FdedU1h43Xh9XtD8g/EjJGZk/N458+aZ32ulhuacJEexe3d9ASuEQxMQ== dependencies: "@babel/runtime" "^7.20.13" core-js-compat "3.31.0" -gatsby-link@^5.13.1: - version "5.13.1" - resolved "https://registry.yarnpkg.com/gatsby-link/-/gatsby-link-5.13.1.tgz#f37d57f1eb45f8645f0dbc9ffc8c76aca47546ad" - integrity sha512-naQxvgX/rd4Pj5ICL2DcqT30TAENk6wHttcLioxIqW9/UhwAXGkM9QsOJOyUmwbrp37UIKU3K92Ks/cMbRxwXA== +gatsby-link@^5.14.0: + version "5.14.0" + resolved "https://registry.yarnpkg.com/gatsby-link/-/gatsby-link-5.14.0.tgz#15b731c6e1050c51f1ff443959d82b523cb61b0a" + integrity sha512-BJYTdZ23pObzan1BG/XlqsnlQgiQS54Ztm1CuYj9OPtORT4i9kBrNSaRrQZdsLHM9s2Ox+oBullzZSi6CU/CbQ== dependencies: "@types/reach__router" "^1.3.10" - gatsby-page-utils "^3.13.1" + gatsby-page-utils "^3.14.0" prop-types "^15.8.1" -gatsby-page-utils@^3.13.1: - version "3.13.1" - resolved "https://registry.yarnpkg.com/gatsby-page-utils/-/gatsby-page-utils-3.13.1.tgz#bd159e9f21ce48f170064682b9a8d087ce49abf4" - integrity sha512-+/V+ZKPn1Lv3KfeTBV/XUVljwTFQq5kg3T0esu9ygXEz3EVXjG5VjL/IX57awiDm9sLsEALqRuuYLoHpfNHg0A== +gatsby-page-utils@^3.14.0: + version "3.14.0" + resolved "https://registry.yarnpkg.com/gatsby-page-utils/-/gatsby-page-utils-3.14.0.tgz#a1beaf5cc4805bd95480f5dd95aca7aeb480e8be" + integrity sha512-/Od94fkzkkSzngqglctGMTssQPvHt/Cnjl9hFPM2Clh5UIBSsHjOgOiqM0EPg7VRLXBZypq3DspG2lNaeqCNIA== dependencies: "@babel/runtime" "^7.20.13" bluebird "^3.7.2" chokidar "^3.5.3" fs-exists-cached "^1.0.0" - gatsby-core-utils "^4.13.1" + gatsby-core-utils "^4.14.0" glob "^7.2.3" lodash "^4.17.21" micromatch "^4.0.5" -gatsby-parcel-config@1.13.1: - version "1.13.1" - resolved "https://registry.yarnpkg.com/gatsby-parcel-config/-/gatsby-parcel-config-1.13.1.tgz#1d504157a2ff02227ff239bd85b0f473e41f1981" - integrity sha512-zEiDKnq1UQvDDireYQN2TmDsxDf9n2SHYXASHyUTipwsMlNMEi0nLav1vHRQAfzbjw4tabk9Z5kcknkJ6nLqOA== +gatsby-parcel-config@1.14.0: + version "1.14.0" + resolved "https://registry.yarnpkg.com/gatsby-parcel-config/-/gatsby-parcel-config-1.14.0.tgz#450a8edc6965b01a02c4d6d69f8080489dd7d317" + integrity sha512-S7RQOo1O5wzHxHB1AHh4xKbg8Jj76VPbSMfVsVVapL2Ht7p1zxrZ7p2pOX3pr5WJnfacREOQwLhlk8rk8TDbGw== dependencies: - "@gatsbyjs/parcel-namer-relative-to-cwd" "^2.13.1" + "@gatsbyjs/parcel-namer-relative-to-cwd" "2.14.0" "@parcel/bundler-default" "2.8.3" "@parcel/compressor-raw" "2.8.3" "@parcel/namer-default" "2.8.3" @@ -6516,41 +6956,41 @@ gatsby-parcel-config@1.13.1: "@parcel/transformer-js" "2.8.3" "@parcel/transformer-json" "2.8.3" -gatsby-plugin-image@^3.13.1: - version "3.13.1" - resolved "https://registry.yarnpkg.com/gatsby-plugin-image/-/gatsby-plugin-image-3.13.1.tgz#5c6e03b1c21bb0dbc076f589aea9911086b25bde" - integrity sha512-v5jGXxjr//iLk7LzpW6RW/9H4KNVezxee2Sgy9mxvdvekTuFQLYoQmtk2jOZINMZMP3Vm+Rl3MqWWVMfhHuWFw== +gatsby-plugin-image@^3.14.0: + version "3.14.0" + resolved "https://registry.yarnpkg.com/gatsby-plugin-image/-/gatsby-plugin-image-3.14.0.tgz#a5a67be6cd460effa0ea70a4fdcdd8baa4a0c380" + integrity sha512-sEHZUSb67yRu8YJSV/Otb3QboYma8YuePu88c2wyWFq4kK4Hgf1YsbQEWqj5ywg+ikRULbkR6TAtTpJ3waQGRg== dependencies: "@babel/code-frame" "^7.18.6" "@babel/parser" "^7.20.13" "@babel/runtime" "^7.20.13" "@babel/traverse" "^7.20.13" babel-jsx-utils "^1.1.0" - babel-plugin-remove-graphql-queries "^5.13.1" + babel-plugin-remove-graphql-queries "^5.14.0" camelcase "^6.3.0" chokidar "^3.5.3" common-tags "^1.8.2" - fs-extra "^11.1.1" - gatsby-core-utils "^4.13.1" - gatsby-plugin-utils "^4.13.1" + fs-extra "^11.2.0" + gatsby-core-utils "^4.14.0" + gatsby-plugin-utils "^4.14.0" objectFitPolyfill "^2.3.5" prop-types "^15.8.1" -gatsby-plugin-manifest@^5.13.1: - version "5.13.1" - resolved "https://registry.yarnpkg.com/gatsby-plugin-manifest/-/gatsby-plugin-manifest-5.13.1.tgz#4e7a87c38c5d47622016eec70ce1383f6e331f60" - integrity sha512-F8zGMYz2tRDAzQO7hLrYv+xCFyIoeySeGsEk9j1KTdWB4liVQvLtFSXzj7yljyOTinDmA7mDStCiQFStC0rHZQ== +gatsby-plugin-manifest@^5.14.0: + version "5.14.0" + resolved "https://registry.yarnpkg.com/gatsby-plugin-manifest/-/gatsby-plugin-manifest-5.14.0.tgz#c5e701af508c534102f27c2c52b4cb41a7599bc5" + integrity sha512-ZJS+sCg8KIlXTEilInBt+kkPbGPOXX3wuRlOJiHwcou+uCmU/VZ4gif1DVazCseAbWtAdQxb3GkMlKTsGqtYiQ== dependencies: "@babel/runtime" "^7.20.13" - gatsby-core-utils "^4.13.1" - gatsby-plugin-utils "^4.13.1" + gatsby-core-utils "^4.14.0" + gatsby-plugin-utils "^4.14.0" semver "^7.5.3" sharp "^0.32.6" -gatsby-plugin-mdx@^5.13.1: - version "5.13.1" - resolved "https://registry.yarnpkg.com/gatsby-plugin-mdx/-/gatsby-plugin-mdx-5.13.1.tgz#9fd9d7612daefd372cb9c0f0b9ba8045f490bf3d" - integrity sha512-ZL/z1j8zBzQSqFTEoVdC+jPNpN/CXse2h87wUz78V+BMjp40ccR0DCo62KgF40HVz4iCEYVufqfjQNc0nLoSow== +gatsby-plugin-mdx@^5.14.0: + version "5.14.0" + resolved "https://registry.yarnpkg.com/gatsby-plugin-mdx/-/gatsby-plugin-mdx-5.14.0.tgz#5f95124cae148bd5e91559198d983c562a88b0a4" + integrity sha512-tJJmR+sRrWFRuUKUq5bJLWcLXo6FTP06tfE+7yN2foKcW6+SphY3EVvqiTnCzNAkpvUCP3EASLMg2WGWIVzi4A== dependencies: "@mdx-js/mdx" "^2.3.0" acorn "^8.8.2" @@ -6558,9 +6998,9 @@ gatsby-plugin-mdx@^5.13.1: astring "^1.8.5" deepmerge "^4.3.1" estree-util-build-jsx "^2.2.2" - fs-extra "^11.1.1" - gatsby-core-utils "^4.13.1" - gatsby-plugin-utils "^4.13.1" + fs-extra "^11.2.0" + gatsby-core-utils "^4.14.0" + gatsby-plugin-utils "^4.14.0" gray-matter "^4.0.3" mdast-util-mdx "^2.0.1" mdast-util-to-hast "^10.2.0" @@ -6572,64 +7012,63 @@ gatsby-plugin-mdx@^5.13.1: unist-util-visit "^4.1.2" vfile "^5.3.7" -gatsby-plugin-page-creator@^5.13.1: - version "5.13.1" - resolved "https://registry.yarnpkg.com/gatsby-plugin-page-creator/-/gatsby-plugin-page-creator-5.13.1.tgz#41e844a16696e7a27d1909413a1102c314e86220" - integrity sha512-WBTQc0cFqaojS1Oh7MjBRSnLCOWWWl3L5t5LEsXwV+4L9OL6D5fLTz7K5xC34OVgfAIryuEKE/M2ZIEk3onVnw== +gatsby-plugin-page-creator@^5.14.0: + version "5.14.0" + resolved "https://registry.yarnpkg.com/gatsby-plugin-page-creator/-/gatsby-plugin-page-creator-5.14.0.tgz#d825a6ca7f5ee5a15490c635d509bfdfbcd552f1" + integrity sha512-H7Yk6Z+kRgOKlco6gFyj9tay+V9eeufZce+NSH6tqHq7pQ56+/X46y4XdeBsrr6wnRs0dBy+Z9rmJTD5w78c2w== dependencies: "@babel/runtime" "^7.20.13" "@babel/traverse" "^7.20.13" "@sindresorhus/slugify" "^1.1.2" chokidar "^3.5.3" fs-exists-cached "^1.0.0" - fs-extra "^11.1.1" - gatsby-core-utils "^4.13.1" - gatsby-page-utils "^3.13.1" - gatsby-plugin-utils "^4.13.1" - gatsby-telemetry "^4.13.1" + fs-extra "^11.2.0" + gatsby-core-utils "^4.14.0" + gatsby-page-utils "^3.14.0" + gatsby-plugin-utils "^4.14.0" globby "^11.1.0" lodash "^4.17.21" -gatsby-plugin-postcss@^6.13.1: - version "6.13.1" - resolved "https://registry.yarnpkg.com/gatsby-plugin-postcss/-/gatsby-plugin-postcss-6.13.1.tgz#5fd5c83c4cc96e6f3c4cda5fad0c718cae0520ff" - integrity sha512-5b/6D2Y91nAYpPVIwVO1PgrknQc+CDHQDrM7C7/SeUcT01ghQWtLo99v5PseNiGiO3XEkps4dbuLfNR7VzZYZA== +gatsby-plugin-postcss@^6.14.0: + version "6.14.0" + resolved "https://registry.yarnpkg.com/gatsby-plugin-postcss/-/gatsby-plugin-postcss-6.14.0.tgz#bdf374173f4717746ae5b131b371c2182b2e39c7" + integrity sha512-cX3tVwMh/ndQEqYakp3BgpBmTKXZB8FB3csu55eq2wu+aA0XzpnDSsdYT99HzGV93FtBHvxIaF2E5LUavz52EQ== dependencies: "@babel/runtime" "^7.20.13" - postcss-loader "^7.3.3" + postcss-loader "^7.3.4" -gatsby-plugin-sharp@^5.13.1: - version "5.13.1" - resolved "https://registry.yarnpkg.com/gatsby-plugin-sharp/-/gatsby-plugin-sharp-5.13.1.tgz#051da57ddef3ad75a52152d3b78e3ed07589cff9" - integrity sha512-PA1LxLjZ7nKjgGykfbIxNQqrfqqfNsMN6+7wZNy5HK4Vhqjw1zDyImJEBEn6v08L2T3tlLMgR0or/OE7yo7F9A== +gatsby-plugin-sharp@^5.14.0: + version "5.14.0" + resolved "https://registry.yarnpkg.com/gatsby-plugin-sharp/-/gatsby-plugin-sharp-5.14.0.tgz#c3c688f9e32b6b325fee2cac9f7b7359f18ff561" + integrity sha512-Kk0hePabeuFI9wJ3a4mhtubpn/7SrALM4YlZJIOvXVYfx2mGv3SIHpAtm0YcLxi+lBKKVUPcA5uh3gNptupDTQ== dependencies: "@babel/runtime" "^7.20.13" - async "^3.2.4" + async "^3.2.5" bluebird "^3.7.2" debug "^4.3.4" filenamify "^4.3.0" - fs-extra "^11.1.1" - gatsby-core-utils "^4.13.1" - gatsby-plugin-utils "^4.13.1" + fs-extra "^11.2.0" + gatsby-core-utils "^4.14.0" + gatsby-plugin-utils "^4.14.0" lodash "^4.17.21" probe-image-size "^7.2.3" semver "^7.5.3" sharp "^0.32.6" -gatsby-plugin-sitemap@^6.13.1: - version "6.13.1" - resolved "https://registry.yarnpkg.com/gatsby-plugin-sitemap/-/gatsby-plugin-sitemap-6.13.1.tgz#c8752b3c57ff237ad01493b1b89ab26ac5635abb" - integrity sha512-PuBcCjpwVj7NVW/GXFgmP2NLhM5FsFV618pVGCWnLSr0cPrtJsmPtLze1PMaq148EGUGnnctl/+7VLozheQ/CA== +gatsby-plugin-sitemap@^6.14.0: + version "6.14.0" + resolved "https://registry.yarnpkg.com/gatsby-plugin-sitemap/-/gatsby-plugin-sitemap-6.14.0.tgz#e7815fc454499409350b2d8af9a0910208da4884" + integrity sha512-pF7sv+4IdihiyMaQcXzxXrsA8XzbCwJuiDmHN8PvxrMa2v9FFwjhGXEX/dwJAOSDz98tauT7PRj8Ys4F2DfOTQ== dependencies: "@babel/runtime" "^7.20.13" common-tags "^1.8.2" minimatch "^3.1.2" sitemap "^7.1.1" -gatsby-plugin-typescript@^5.13.1: - version "5.13.1" - resolved "https://registry.yarnpkg.com/gatsby-plugin-typescript/-/gatsby-plugin-typescript-5.13.1.tgz#4544000239e7801eb0ab7636d7de27c18a90672d" - integrity sha512-FsTihoFKWTjzyIaUTndqktL39qMTAb0KyHj9kP+LxTY2FySqHIvFoYc1ycS5q52J76AoWUiGStJuuQLMNq66FQ== +gatsby-plugin-typescript@^5.14.0: + version "5.14.0" + resolved "https://registry.yarnpkg.com/gatsby-plugin-typescript/-/gatsby-plugin-typescript-5.14.0.tgz#3c497954af635c2ee65018b85ea00b9184be9a3b" + integrity sha512-rvdNtBKGYV0HpvZ78up39rLu5057pVTil1Z7O65MVIQyB99FGfYy96dygWOQhf9wBh0tt9oFYvaU/VMY9Gwyrg== dependencies: "@babel/core" "^7.20.12" "@babel/plugin-proposal-nullish-coalescing-operator" "^7.18.6" @@ -6637,104 +7076,86 @@ gatsby-plugin-typescript@^5.13.1: "@babel/plugin-proposal-optional-chaining" "^7.20.7" "@babel/preset-typescript" "^7.18.6" "@babel/runtime" "^7.20.13" - babel-plugin-remove-graphql-queries "^5.13.1" + babel-plugin-remove-graphql-queries "^5.14.0" -gatsby-plugin-utils@^4.13.1: - version "4.13.1" - resolved "https://registry.yarnpkg.com/gatsby-plugin-utils/-/gatsby-plugin-utils-4.13.1.tgz#9def57eea4559e1355244ebf706ce528231a510e" - integrity sha512-dQ8cZyUENWHqZOOSBBYWCJ8yG3zSYnHYk0mKQbgZblUS30Sp7ZFM4r0/+lsvUkEYaBOnzFBQjSSQtTa0xu9QWA== +gatsby-plugin-utils@^4.14.0: + version "4.14.0" + resolved "https://registry.yarnpkg.com/gatsby-plugin-utils/-/gatsby-plugin-utils-4.14.0.tgz#5ba6f5a92ba3a79d5b66ab59c8243d4504418782" + integrity sha512-w7EZ0C7JA9sG3JiBS2ffGsrZplAbtNk0Junb3UeUFj66CY0MU8UV0rZIzBkz+EMbQvPkxvVJNQu4/tA9ohCvfA== dependencies: "@babel/runtime" "^7.20.13" - fastq "^1.15.0" - fs-extra "^11.1.1" - gatsby-core-utils "^4.13.1" - gatsby-sharp "^1.13.0" + fastq "^1.16.0" + fs-extra "^11.2.0" + gatsby-core-utils "^4.14.0" + gatsby-sharp "^1.14.0" graphql-compose "^9.0.10" import-from "^4.0.0" - joi "^17.9.2" + joi "^17.11.0" mime "^3.0.0" -gatsby-react-router-scroll@^6.13.1: - version "6.13.1" - resolved "https://registry.yarnpkg.com/gatsby-react-router-scroll/-/gatsby-react-router-scroll-6.13.1.tgz#b20d492a1fc0ac4f31a0435452b06e22e8cb6c41" - integrity sha512-srBpg/ZHW4miwH/4OWOcspHqr8ZmKLE4DBNvckt0KO4giJerWiGoLj6qePwLFRWZPfV7txJr2kuUzACxarpL5g== +gatsby-react-router-scroll@^6.14.0: + version "6.14.0" + resolved "https://registry.yarnpkg.com/gatsby-react-router-scroll/-/gatsby-react-router-scroll-6.14.0.tgz#55c1a21e64ca195bf9ff97e8c12a31e0c1f2ccf2" + integrity sha512-Tx+TsS2JE4BGbImgadKEfESpZeCUkQKbL5OTybNHk9T/E9zDDtECD6IRVCiC1w5LucxvrNwRww9oyeAHCZbbeg== dependencies: "@babel/runtime" "^7.20.13" prop-types "^15.8.1" -gatsby-script@^2.13.0: - version "2.13.0" - resolved "https://registry.yarnpkg.com/gatsby-script/-/gatsby-script-2.13.0.tgz#0fa7316739ebc31da217091e54db8ef86f41dbb3" - integrity sha512-TGNQGerf1NMJrgJkWxWrW6FFMAuC0L76WlyZgGXmhckPW/x7V1SxZrm0a2Q99kRHyoC59RYl2gTQWHaIwV+ZjA== +gatsby-script@^2.14.0: + version "2.14.0" + resolved "https://registry.yarnpkg.com/gatsby-script/-/gatsby-script-2.14.0.tgz#30a62ed010bfd29a8f8802a7fd0850cc4f7030a3" + integrity sha512-A3IV2cXUrXrzW39kTYJMjbvUlHRjoaZVwH0zsfPINldXw9PenIlr5uBOMWx3IWUlKIghN1rBG+sRIyNczPtxyw== -gatsby-sharp@^1.13.0: - version "1.13.0" - resolved "https://registry.yarnpkg.com/gatsby-sharp/-/gatsby-sharp-1.13.0.tgz#4d55d877ed3a5c9cd7ac45f27e13d07acb99993b" - integrity sha512-DviUtgm7tatSd1Hm54o/orHimOcyXBO9OJkSfzEchPFClvOza+2Qe/lqZShio0gFDxmG0Jgn0XCLzG7uH5VyJQ== +gatsby-sharp@^1.14.0: + version "1.14.0" + resolved "https://registry.yarnpkg.com/gatsby-sharp/-/gatsby-sharp-1.14.0.tgz#85ee5b60999d68c508199b74f445a1d369e84254" + integrity sha512-2lZg8NEg5M8jzkMYZouf0I5e1TVpwjtEiKg48R4dGOhYqDKGfENVJWRnvYtw12zNfgBgQ/gUryG7Zj7qMLVANA== dependencies: sharp "^0.32.6" -gatsby-source-filesystem@^5.13.1: - version "5.13.1" - resolved "https://registry.yarnpkg.com/gatsby-source-filesystem/-/gatsby-source-filesystem-5.13.1.tgz#1aad8f969fbfd4efe6f7cfcd8542253f54fdbd8a" - integrity sha512-nFWzOBpi84nDeVNeO7bpKL9mVYMl1tfjJmE5l868YATFShGzZnA6qMd200XCsf78PexZHAiV/P1MlsyKqjJduA== +gatsby-source-filesystem@^5.14.0: + version "5.14.0" + resolved "https://registry.yarnpkg.com/gatsby-source-filesystem/-/gatsby-source-filesystem-5.14.0.tgz#f2c765f2eead173918b3d1bc92626bc770867972" + integrity sha512-uewEJaHvdxZlN6DOtZCIUuI7X5v9MRk5IVpCYy9SIZCDbnUA5siEd2A4SC+kcXxJM6AUbdvkfayqpVJrm5JaZA== dependencies: "@babel/runtime" "^7.20.13" chokidar "^3.5.3" file-type "^16.5.4" - fs-extra "^11.1.1" - gatsby-core-utils "^4.13.1" + fs-extra "^11.2.0" + gatsby-core-utils "^4.14.0" mime "^3.0.0" pretty-bytes "^5.6.0" valid-url "^1.0.9" xstate "^4.38.0" -gatsby-telemetry@^4.13.1: - version "4.13.1" - resolved "https://registry.yarnpkg.com/gatsby-telemetry/-/gatsby-telemetry-4.13.1.tgz#f0f80b1fb71116085931b8c6b8f48efdbeb561bc" - integrity sha512-NstKs3N8LK9rwEli6SXO+ClNmewFbVzqS2yo6XZzQSXbymH6+Kkk+eqQivKhrD8PbQLLrdXkk1p47n91zc85XQ== - dependencies: - "@babel/code-frame" "^7.18.6" - "@babel/runtime" "^7.20.13" - "@turist/fetch" "^7.2.0" - "@turist/time" "^0.0.2" - boxen "^5.1.2" - configstore "^5.0.1" - fs-extra "^11.1.1" - gatsby-core-utils "^4.13.1" - git-up "^7.0.0" - is-docker "^2.2.1" - lodash "^4.17.21" - node-fetch "^2.6.11" - -gatsby-transformer-sharp@^5.13.1: - version "5.13.1" - resolved "https://registry.yarnpkg.com/gatsby-transformer-sharp/-/gatsby-transformer-sharp-5.13.1.tgz#9822caac4566b0d13e278bb1db9bdc9c839badd3" - integrity sha512-H5gBpnKOn86ns65fv0cP8yIpnT//+dkpnSOZSrtrtYEgmwEFqblUHSSyolu2SURgE+Af55W7IKTYk2w2d9+PCQ== +gatsby-transformer-sharp@^5.14.0: + version "5.14.0" + resolved "https://registry.yarnpkg.com/gatsby-transformer-sharp/-/gatsby-transformer-sharp-5.14.0.tgz#f94d1cbee764127a99eab6a4bb204a948faf5a46" + integrity sha512-U4Z3t6JBKgM1QSpoicMsUzD5+BJGdO1bXW4b09M1Ze46B86gIBZFtSoH57PI3pARLjx0TmA9aoADbAo1B0jWPw== dependencies: "@babel/runtime" "^7.20.13" bluebird "^3.7.2" common-tags "^1.8.2" - fs-extra "^11.1.1" - gatsby-plugin-utils "^4.13.1" + fs-extra "^11.2.0" + gatsby-plugin-utils "^4.14.0" probe-image-size "^7.2.3" semver "^7.5.3" sharp "^0.32.6" -gatsby-worker@^2.13.1: - version "2.13.1" - resolved "https://registry.yarnpkg.com/gatsby-worker/-/gatsby-worker-2.13.1.tgz#75b1881642149058d68559fa32c0ed2bc1f88f5c" - integrity sha512-CEm+5M2+3PzPcqTaJ0xjNuorQ3d1PfnKG1yqRRPfw8LFdPiycGhZAug/jqx0iTyVWXFHakEmddlykGFEbq54/Q== +gatsby-worker@^2.14.0: + version "2.14.0" + resolved "https://registry.yarnpkg.com/gatsby-worker/-/gatsby-worker-2.14.0.tgz#57965ab2ecafed9c7024e72bd97de91d2773e098" + integrity sha512-a5DjKgC9mjhfLhyQO6ZX9tuKoDY8KkJrfBg3g0GHyh8hCzlxYvnOc+URGBG/PmF+7yNBiNjA1dENKKmD0fBWjw== dependencies: "@babel/core" "^7.20.12" "@babel/runtime" "^7.20.13" - fs-extra "^11.1.1" + fs-extra "^11.2.0" signal-exit "^3.0.7" -gatsby@^5.13.7: - version "5.13.7" - resolved "https://registry.yarnpkg.com/gatsby/-/gatsby-5.13.7.tgz#51b90f907ec230c1555571e54087aca4434087e4" - integrity sha512-slQ0Ky+5ev5dMV6svAgQYWRw3lztTcj6FhmnGvFPWSwrE/L+2TlQG0Izjm8DRoqzQVBp0z+hkCEiRVF7IdzLCw== +gatsby@^5.14.0: + version "5.14.0" + resolved "https://registry.yarnpkg.com/gatsby/-/gatsby-5.14.0.tgz#b3a9e0eef67adbcbbb594f5e5dc26e308fa7d404" + integrity sha512-VZZAN80FrcGLz0uOPK7Eww/tfGUQ26UuZOxiXFhcJgeO29lt8IPwSdNhAimyhbRDkvMNZu7Dz1+tO9PyvPZN4Q== dependencies: "@babel/code-frame" "^7.18.6" "@babel/core" "^7.20.12" @@ -6769,16 +7190,16 @@ gatsby@^5.13.7: address "1.2.2" anser "^2.1.1" autoprefixer "^10.4.14" - axios "^0.21.1" + axios "^1.6.4" babel-jsx-utils "^1.1.0" babel-loader "^8.3.0" babel-plugin-add-module-exports "^1.0.4" babel-plugin-dynamic-import-node "^2.3.3" - babel-plugin-remove-graphql-queries "^5.13.1" - babel-preset-gatsby "^3.13.2" + babel-plugin-remove-graphql-queries "^5.14.0" + babel-preset-gatsby "^3.14.0" better-opn "^2.1.1" bluebird "^3.7.2" - body-parser "1.20.1" + body-parser "1.20.3" browserslist "^4.21.9" cache-manager "^2.11.1" chalk "^4.1.2" @@ -6803,7 +7224,7 @@ gatsby@^5.13.7: eslint-config-react-app "^6.0.0" eslint-plugin-flowtype "^5.10.0" eslint-plugin-import "^2.27.5" - eslint-plugin-jsx-a11y "^6.7.1" + eslint-plugin-jsx-a11y "^6.8.0" eslint-plugin-react "^7.32.2" eslint-plugin-react-hooks "^4.6.0" eslint-webpack-plugin "^2.7.0" @@ -6816,21 +7237,20 @@ gatsby@^5.13.7: file-loader "^6.2.0" find-cache-dir "^3.3.2" fs-exists-cached "1.0.0" - fs-extra "^11.1.1" - gatsby-cli "^5.13.3" - gatsby-core-utils "^4.13.1" - gatsby-graphiql-explorer "^3.13.1" - gatsby-legacy-polyfills "^3.13.1" - gatsby-link "^5.13.1" - gatsby-page-utils "^3.13.1" - gatsby-parcel-config "1.13.1" - gatsby-plugin-page-creator "^5.13.1" - gatsby-plugin-typescript "^5.13.1" - gatsby-plugin-utils "^4.13.1" - gatsby-react-router-scroll "^6.13.1" - gatsby-script "^2.13.0" - gatsby-telemetry "^4.13.1" - gatsby-worker "^2.13.1" + fs-extra "^11.2.0" + gatsby-cli "^5.14.0" + gatsby-core-utils "^4.14.0" + gatsby-graphiql-explorer "^3.14.0" + gatsby-legacy-polyfills "^3.14.0" + gatsby-link "^5.14.0" + gatsby-page-utils "^3.14.0" + gatsby-parcel-config "1.14.0" + gatsby-plugin-page-creator "^5.14.0" + gatsby-plugin-typescript "^5.14.0" + gatsby-plugin-utils "^4.14.0" + gatsby-react-router-scroll "^6.14.0" + gatsby-script "^2.14.0" + gatsby-worker "^2.14.0" glob "^7.2.3" globby "^11.1.0" got "^11.8.6" @@ -6863,7 +7283,7 @@ gatsby@^5.13.7: opentracing "^0.14.7" p-defer "^3.0.0" parseurl "^1.3.3" - path-to-regexp "0.1.7" + path-to-regexp "0.1.10" physical-cpu-count "^2.0.0" platform "^1.3.6" postcss "^8.4.24" @@ -6897,14 +7317,14 @@ gatsby@^5.13.7: url-loader "^4.1.1" uuid "^8.3.2" webpack "^5.88.1" - webpack-dev-middleware "^4.3.0" + webpack-dev-middleware "^5.3.4" webpack-merge "^5.9.0" webpack-stats-plugin "^1.1.3" webpack-virtual-modules "^0.5.0" xstate "^4.38.0" yaml-loader "^0.8.0" optionalDependencies: - gatsby-sharp "^1.13.0" + gatsby-sharp "^1.14.0" gensync@^1.0.0-beta.2: version "1.0.0-beta.2" @@ -6958,14 +7378,6 @@ get-symbol-description@^1.0.2: es-errors "^1.3.0" get-intrinsic "^1.2.4" -git-up@^7.0.0: - version "7.0.0" - resolved "https://registry.yarnpkg.com/git-up/-/git-up-7.0.0.tgz#bace30786e36f56ea341b6f69adfd83286337467" - integrity sha512-ONdIrbBCFusq1Oy0sC71F5azx8bVkvtZtMJAsv+a6lz5YAmbNnLD6HAB4gptHZVLPR8S2/kVN6Gab7lryq5+lQ== - dependencies: - is-ssh "^1.4.0" - parse-url "^8.1.0" - github-from-package@0.0.0: version "0.0.0" resolved "https://registry.yarnpkg.com/github-from-package/-/github-from-package-0.0.0.tgz#97fb5d96bfde8973313f20e8288ef9a167fa64ce" @@ -7128,6 +7540,13 @@ graphemer@^1.4.0: resolved "https://registry.yarnpkg.com/graphemer/-/graphemer-1.4.0.tgz#fb2f1d55e0e3a1849aeffc90c4fa0dd53a0e66c6" integrity sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag== +graphlib@^2.1.8: + version "2.1.8" + resolved "https://registry.yarnpkg.com/graphlib/-/graphlib-2.1.8.tgz#5761d414737870084c92ec7b5dbcb0592c9d35da" + integrity sha512-jcLLfkpoVGmH7/InMC/1hIvOPSUh38oJtGhvrOFGzioE1DZ+0YW16RgmOJhHiuWTvGiJQ9Z1Ik43JvkRPRvE+A== + dependencies: + lodash "^4.17.15" + graphql-compose@^9.0.10: version "9.0.11" resolved "https://registry.yarnpkg.com/graphql-compose/-/graphql-compose-9.0.11.tgz#c3b5a3378b62be0deb9ea43d147b756010f6d611" @@ -7225,7 +7644,7 @@ has@~1.0.3: resolved "https://registry.yarnpkg.com/has/-/has-1.0.4.tgz#2eb2860e000011dae4f1406a86fe80e530fb2ec6" integrity sha512-qdSAmqLF6209RFj4VVItywPMbm3vWylknmB3nvNiUIs72xAimcM8nVYxYr7ncvZq5qzk9MKIZR8ijqD/1QuYjQ== -hash-wasm@^4.9.0: +hash-wasm@^4.11.0: version "4.11.0" resolved "https://registry.yarnpkg.com/hash-wasm/-/hash-wasm-4.11.0.tgz#7d1479b114c82e48498fdb1d2462a687d00386d5" integrity sha512-HVusNXlVqHe0fzIzdQOGolnFN6mX/fqcrSAOcTBXdvzrXVHwTz11vXeKRmkR5gTuwVpvHZEIyKoePDvuAR+XwQ== @@ -7438,6 +7857,11 @@ http2-wrapper@^2.1.10: quick-lru "^5.1.1" resolve-alpn "^1.2.0" +hull.js@^1.0.6: + version "1.0.6" + resolved "https://registry.yarnpkg.com/hull.js/-/hull.js-1.0.6.tgz#75f013e8171eb9a871b4a94887e89eb555461d0e" + integrity sha512-TC7e9sHYOaCVms0sn2hN7buxnaGfcl9h5EPVoVX9DTPoMpqQiS9bf3tmGDgiNaMVHBD91RAvWjCxrJ5Jx8BI5A== + human-signals@^2.1.0: version "2.1.0" resolved "https://registry.yarnpkg.com/human-signals/-/human-signals-2.1.0.tgz#dc91fcba42e4d06e4abaed33b3e7a3c02f514ea0" @@ -7614,6 +8038,11 @@ is-alphanumerical@^2.0.0: is-alphabetical "^2.0.0" is-decimal "^2.0.0" +is-any-array@^2.0.0, is-any-array@^2.0.1: + version "2.0.1" + resolved "https://registry.yarnpkg.com/is-any-array/-/is-any-array-2.0.1.tgz#9233242a9c098220290aa2ec28f82ca7fa79899e" + integrity sha512-UtilS7hLRu++wb/WBAw9bNuP1Eg04Ivn1vERJck8zJthEvXCBEBpGR/33u/xLKWEQf95803oalHrVDptcAvFdQ== + is-arguments@^1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/is-arguments/-/is-arguments-1.1.1.tgz#15b3f88fda01f2a97fec84ca761a560f123efa9b" @@ -7717,7 +8146,7 @@ is-decimal@^2.0.0: resolved "https://registry.yarnpkg.com/is-decimal/-/is-decimal-2.0.1.tgz#9469d2dc190d0214fd87d78b78caecc0cc14eef7" integrity sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A== -is-docker@^2.0.0, is-docker@^2.1.1, is-docker@^2.2.1: +is-docker@^2.0.0, is-docker@^2.1.1: version "2.2.1" resolved "https://registry.yarnpkg.com/is-docker/-/is-docker-2.2.1.tgz#33eeabe23cfe86f14bde4408a02c0cfb853acdaa" integrity sha512-F+i2BKsFrH66iaUFc0woD8sLy8getkwTwtOBjvs56Cx4CgJDeKQeqfz8wAYiSb8JOprWhHH5p77PbmYCvvUuXQ== @@ -7891,13 +8320,6 @@ is-shared-array-buffer@^1.0.2, is-shared-array-buffer@^1.0.3: dependencies: call-bind "^1.0.7" -is-ssh@^1.4.0: - version "1.4.0" - resolved "https://registry.yarnpkg.com/is-ssh/-/is-ssh-1.4.0.tgz#4f8220601d2839d8fa624b3106f8e8884f01b8b2" - integrity sha512-x7+VxdxOdlV3CYpjvRLBv5Lo9OJerlYanjwFrPR9fuGPjCiNiCzFgAWpiLAohSbsnH4ZAys3SBh+hq5rJosxUQ== - dependencies: - protocols "^2.0.1" - is-stream@^2.0.0: version "2.0.1" resolved "https://registry.yarnpkg.com/is-stream/-/is-stream-2.0.1.tgz#fac1e3d53b97ad5a9d0ae9cef2389f5810a5c077" @@ -8076,7 +8498,7 @@ jiti@^1.20.0, jiti@^1.21.0: resolved "https://registry.yarnpkg.com/jiti/-/jiti-1.21.6.tgz#6c7f7398dd4b3142767f9a168af2f317a428d268" integrity sha512-2yTgeWTWzMWkHu6Jp9NKgePDaYHbntiwvYuuJLbbN9vl7DC9DvXKOB2BC3ZZ92D3cvV/aflH0osDfwpHepQ53w== -joi@^17.9.2: +joi@^17.11.0, joi@^17.9.2: version "17.13.3" resolved "https://registry.yarnpkg.com/joi/-/joi-17.13.3.tgz#0f5cc1169c999b30d344366d384b12d92558bcec" integrity sha512-otDA4ldcIx+ZXsKHWmp0YizCweVRZG96J10b0FevjfuncLO1oX59THoAmHkNubYJ+9gWsYsp5k8v4ib6oDv1fA== @@ -8505,10 +8927,10 @@ lru-queue@^0.1.0: dependencies: es5-ext "~0.10.2" -lucide-react@^0.454.0: - version "0.454.0" - resolved "https://registry.yarnpkg.com/lucide-react/-/lucide-react-0.454.0.tgz#a81b9c482018720f07ead0503ae502d94d528444" - integrity sha512-hw7zMDwykCLnEzgncEEjHeA6+45aeEzRYuKHuyRSOPkhko+J3ySGjGIzu+mmMfDFG1vazHepMaYFYHbTFAZAAQ== +lucide-react@^0.456.0: + version "0.456.0" + resolved "https://registry.yarnpkg.com/lucide-react/-/lucide-react-0.456.0.tgz#14906c3355cc65d3380b7b2294b331aeda1bb392" + integrity sha512-DIIGJqTT5X05sbAsQ+OhA8OtJYyD4NsEMCA/HQW/Y6ToPQ7gwbtujIoeAaup4HpHzV35SQOarKAWH8LYglB6eA== make-dir@^3.0.0, make-dir@^3.0.2, make-dir@^3.1.0: version "3.1.0" @@ -8517,13 +8939,6 @@ make-dir@^3.0.0, make-dir@^3.0.2, make-dir@^3.1.0: dependencies: semver "^6.0.0" -map-age-cleaner@^0.1.3: - version "0.1.3" - resolved "https://registry.yarnpkg.com/map-age-cleaner/-/map-age-cleaner-0.1.3.tgz#7d583a7306434c055fe474b0f45078e6e1b4b92a" - integrity sha512-bJzx6nMoP6PDLPBFmg7+xRKeFZvFboMrGlxmNj9ClvX53KrmvM5bXFXEWjbz4cz1AFn+jWJ9z/DJSz7hrs0w3w== - dependencies: - p-defer "^1.0.0" - map-cache@^0.2.0: version "0.2.2" resolved "https://registry.yarnpkg.com/map-cache/-/map-cache-0.2.2.tgz#c32abd0bd6525d9b051645bb4f26ac5dc98a0dbf" @@ -8814,15 +9229,7 @@ media-typer@0.3.0: resolved "https://registry.yarnpkg.com/media-typer/-/media-typer-0.3.0.tgz#8710d7af0aa626f8fffa1ce00168545263255748" integrity sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ== -mem@^8.1.1: - version "8.1.1" - resolved "https://registry.yarnpkg.com/mem/-/mem-8.1.1.tgz#cf118b357c65ab7b7e0817bdf00c8062297c0122" - integrity sha512-qFCFUDs7U3b8mBDPyz5EToEKoAkgCzqquIgi9nkkR9bixxOVOre+09lbuH7+9Kn2NFpm56M3GUWVbU2hQgdACA== - dependencies: - map-age-cleaner "^0.1.3" - mimic-fn "^3.1.0" - -memfs@^3.1.2, memfs@^3.2.2: +memfs@^3.1.2, memfs@^3.4.3: version "3.6.0" resolved "https://registry.yarnpkg.com/memfs/-/memfs-3.6.0.tgz#d7a2110f86f79dd950a8b6df6d57bc984aa185f6" integrity sha512-EGowvkkgbMcIChjMTMkESFDbZeSh8xZ7kNSF0hAiAN4Jh6jgHCRS0Ga/+C8y6Au+oqpezRHCfPsmJ2+DwAgiwQ== @@ -9363,7 +9770,7 @@ mime-db@1.52.0: resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.53.0.tgz#3cb63cd820fc29896d9d4e8c32ab4fcd74ccb447" integrity sha512-oHlN/w+3MQ3rba9rqFr6V/ypF10LSkdwUysQL7GkXoTgIWeV+tcXGA852TBxH+gsh8UWoyhR1hKcoMJTuWflpg== -mime-types@^2.1.12, mime-types@^2.1.27, mime-types@^2.1.30, mime-types@~2.1.24, mime-types@~2.1.34: +mime-types@^2.1.12, mime-types@^2.1.27, mime-types@^2.1.31, mime-types@~2.1.24, mime-types@~2.1.34: version "2.1.35" resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.35.tgz#381a871b62a734450660ae3deee44813f70d959a" integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw== @@ -9385,11 +9792,6 @@ mimic-fn@^2.1.0: resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-2.1.0.tgz#7ed2c2ccccaf84d3ffcb7a69b57711fc2083401b" integrity sha512-OqbOk5oEQeAZ8WXWydlu9HJjz9WVdEIvamMCcXmuqUYjTknH/sqsWvhQ3vgwKFRR1HpjvNBKQ37nbJgYzGqGcg== -mimic-fn@^3.1.0: - version "3.1.0" - resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-3.1.0.tgz#65755145bbf3e36954b949c16450427451d5ca74" - integrity sha512-Ysbi9uYW9hFyfrThdDEQuykN4Ey6BuwPD2kpI5ES/nFTDn/98yxYNLZJcgUAKPT/mcrLLKaGzJR9YVxJrIdASQ== - mimic-fn@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/mimic-fn/-/mimic-fn-4.0.0.tgz#60a90550d5cb0b239cca65d893b1a53b29871ecc" @@ -9460,6 +9862,37 @@ mkdirp@^0.5.1, mkdirp@^0.5.4: dependencies: minimist "^1.2.6" +ml-array-max@^1.2.4: + version "1.2.4" + resolved "https://registry.yarnpkg.com/ml-array-max/-/ml-array-max-1.2.4.tgz#2373e2b7e51c8807e456cc0ef364c5863713623b" + integrity sha512-BlEeg80jI0tW6WaPyGxf5Sa4sqvcyY6lbSn5Vcv44lp1I2GR6AWojfUvLnGTNsIXrZ8uqWmo8VcG1WpkI2ONMQ== + dependencies: + is-any-array "^2.0.0" + +ml-array-min@^1.2.3: + version "1.2.3" + resolved "https://registry.yarnpkg.com/ml-array-min/-/ml-array-min-1.2.3.tgz#662f027c400105816b849cc3cd786915d0801495" + integrity sha512-VcZ5f3VZ1iihtrGvgfh/q0XlMobG6GQ8FsNyQXD3T+IlstDv85g8kfV0xUG1QPRO/t21aukaJowDzMTc7j5V6Q== + dependencies: + is-any-array "^2.0.0" + +ml-array-rescale@^1.3.7: + version "1.3.7" + resolved "https://registry.yarnpkg.com/ml-array-rescale/-/ml-array-rescale-1.3.7.tgz#c4d129320d113a732e62dd963dc1695bba9a5340" + integrity sha512-48NGChTouvEo9KBctDfHC3udWnQKNKEWN0ziELvY3KG25GR5cA8K8wNVzracsqSW1QEkAXjTNx+ycgAv06/1mQ== + dependencies: + is-any-array "^2.0.0" + ml-array-max "^1.2.4" + ml-array-min "^1.2.3" + +ml-matrix@^6.10.4: + version "6.12.0" + resolved "https://registry.yarnpkg.com/ml-matrix/-/ml-matrix-6.12.0.tgz#def6a0574b5fdc54a753033830e784a17399e270" + integrity sha512-AGfR+pWaC0GmzjUnB6BfwhndPEUGz0i7QUYdqNuw1zhTov/vSRJ9pP2hs6BoGpaSbtXgrKjZz2zjD1M0xuur6A== + dependencies: + is-any-array "^2.0.1" + ml-array-rescale "^1.3.7" + mock-property@~1.0.0: version "1.0.3" resolved "https://registry.yarnpkg.com/mock-property/-/mock-property-1.0.3.tgz#3e37c50a56609d548cabd56559fde3dd8767b10c" @@ -9899,11 +10332,6 @@ p-cancelable@^3.0.0: resolved "https://registry.yarnpkg.com/p-cancelable/-/p-cancelable-3.0.0.tgz#63826694b54d61ca1c20ebcb6d3ecf5e14cd8050" integrity sha512-mlVgR3PGuzlo0MmTdk4cXqXWlwQDLnONTAg6sm62XkMJEiRxN3GL3SffkYvqwonbkJBcrI7Uvv5Zh9yjvn2iUw== -p-defer@^1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/p-defer/-/p-defer-1.0.0.tgz#9f6eb182f6c9aa8cd743004a7d4f96b196b0fb0c" - integrity sha512-wB3wfAxZpk2AzOfUMJNL+d36xothRSyj8EXOa4f6GMqYDN9BJaaSISbsk+wS9abmnebVw95C2Kb5t85UmpCxuw== - p-defer@^3.0.0: version "3.0.0" resolved "https://registry.yarnpkg.com/p-defer/-/p-defer-3.0.0.tgz#d1dceb4ee9b2b604b1d94ffec83760175d4e6f83" @@ -10012,20 +10440,6 @@ parse-json@^5.0.0, parse-json@^5.2.0: json-parse-even-better-errors "^2.3.0" lines-and-columns "^1.1.6" -parse-path@^7.0.0: - version "7.0.0" - resolved "https://registry.yarnpkg.com/parse-path/-/parse-path-7.0.0.tgz#605a2d58d0a749c8594405d8cc3a2bf76d16099b" - integrity sha512-Euf9GG8WT9CdqwuWJGdf3RkUcTBArppHABkO7Lm8IzRQp0e2r/kkFnmhu4TSK30Wcu5rVAZLmfPKSBBi9tWFog== - dependencies: - protocols "^2.0.0" - -parse-url@^8.1.0: - version "8.1.0" - resolved "https://registry.yarnpkg.com/parse-url/-/parse-url-8.1.0.tgz#972e0827ed4b57fc85f0ea6b0d839f0d8a57a57d" - integrity sha512-xDvOoLU5XRrcOZvnI6b8zA6n9O9ejNk/GExuz1yBuWUGn9KA97GI6HTs6u02wKara1CeVmZhH+0TZFdWScR89w== - dependencies: - parse-path "^7.0.0" - parseurl@^1.3.3, parseurl@~1.3.3: version "1.3.3" resolved "https://registry.yarnpkg.com/parseurl/-/parseurl-1.3.3.tgz#9da19e7bee8d12dff0513ed5b76957793bc2e8d4" @@ -10110,11 +10524,6 @@ path-to-regexp@0.1.10: resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.10.tgz#67e9108c5c0551b9e5326064387de4763c4d5f8b" integrity sha512-7lf7qcQidTku0Gu3YDPc8DJ1q7OOucfa/BSsIwjuh56VU7katFvuM8hULfkwB3Fns/rsVF7PwPKVw1sl5KQS9w== -path-to-regexp@0.1.7: - version "0.1.7" - resolved "https://registry.yarnpkg.com/path-to-regexp/-/path-to-regexp-0.1.7.tgz#df604178005f522f15eb4490e7247a1bfaa67f8c" - integrity sha512-5DFkuoqlv1uYQKxy8omFBeJPQcdoE07Kv2sferDCrAq1ohOU+MSDswDIbnx3YAM60qIOnYa53wBhXW0EbMonrQ== - path-type@^4.0.0: version "4.0.0" resolved "https://registry.yarnpkg.com/path-type/-/path-type-4.0.0.tgz#84ed01c0a7ba380afe09d90a8c180dcd9d03043b" @@ -10144,7 +10553,7 @@ physical-cpu-count@^2.0.0: resolved "https://registry.yarnpkg.com/physical-cpu-count/-/physical-cpu-count-2.0.0.tgz#18de2f97e4bf7a9551ad7511942b5496f7aba660" integrity sha512-rxJOljMuWtYlvREBmd6TZYanfcPhNUKtGDZBjBBS8WG1dpN2iwPsRJZgQqN/OtJuiQckdRFOfzogqJClTrsi7g== -picocolors@^1.0.0, picocolors@^1.0.1, picocolors@^1.1.0: +picocolors@^1.0.0, picocolors@^1.0.1, picocolors@^1.1.0, picocolors@^1.1.1: version "1.1.1" resolved "https://registry.yarnpkg.com/picocolors/-/picocolors-1.1.1.tgz#3d321af3eab939b083c8f929a1d12cda81c26b6b" integrity sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA== @@ -10272,7 +10681,7 @@ postcss-loader@^5.3.0: klona "^2.0.4" semver "^7.3.4" -postcss-loader@^7.3.3: +postcss-loader@^7.3.4: version "7.3.4" resolved "https://registry.yarnpkg.com/postcss-loader/-/postcss-loader-7.3.4.tgz#aed9b79ce4ed7e9e89e56199d25ad1ec8f606209" integrity sha512-iW5WTTBSC5BfsBJ9daFMPVrLT36MrNiC6fqOZTTaHjBNX6Pfd5p+hSBqe/fEeNd7pc13QiAyGt7VdGMw4eRC4A== @@ -10483,12 +10892,21 @@ postcss-unique-selectors@^5.1.1: dependencies: postcss-selector-parser "^6.0.5" -postcss-value-parser@^4.0.0, postcss-value-parser@^4.1.0, postcss-value-parser@^4.2.0: +postcss-value-parser@^4.0.0, postcss-value-parser@^4.0.2, postcss-value-parser@^4.1.0, postcss-value-parser@^4.2.0: version "4.2.0" resolved "https://registry.yarnpkg.com/postcss-value-parser/-/postcss-value-parser-4.2.0.tgz#723c09920836ba6d3e5af019f92bc0971c02e514" integrity sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ== -postcss@^8.2.15, postcss@^8.2.9, postcss@^8.4.23, postcss@^8.4.24, postcss@^8.4.47: +postcss@8.4.38: + version "8.4.38" + resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.4.38.tgz#b387d533baf2054288e337066d81c6bee9db9e0e" + integrity sha512-Wglpdk03BSfXkHoQa3b/oulrotAkwrlLDRSOb9D0bN86FdRyE9lppSp33aHNPgBa0JKCoB+drFLZkQoRRYae5A== + dependencies: + nanoid "^3.3.7" + picocolors "^1.0.0" + source-map-js "^1.2.0" + +postcss@^8.2.15, postcss@^8.2.9, postcss@^8.4.23, postcss@^8.4.24: version "8.4.47" resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.4.47.tgz#5bf6c9a010f3e724c503bf03ef7947dcb0fea365" integrity sha512-56rxCq7G/XfB4EkXq9Egn5GCqugWvDFjafDOThIdMBsI15iqPqR5r15TfSr1YPYeEI19YeaXMCbY6u88Y76GLQ== @@ -10497,6 +10915,15 @@ postcss@^8.2.15, postcss@^8.2.9, postcss@^8.4.23, postcss@^8.4.24, postcss@^8.4. picocolors "^1.1.0" source-map-js "^1.2.1" +postcss@^8.4.49: + version "8.4.49" + resolved "https://registry.yarnpkg.com/postcss/-/postcss-8.4.49.tgz#4ea479048ab059ab3ae61d082190fabfd994fe19" + integrity sha512-OCVPnIObs4N29kxTjzLfUryOkvZEq+pf8jTF0lg8E7uETuWHA+v7j3c/xJmiqpX450191LlmZfUKkXxkTry7nA== + dependencies: + nanoid "^3.3.7" + picocolors "^1.1.1" + source-map-js "^1.2.1" + prebuild-install@^7.1.1: version "7.1.2" resolved "https://registry.yarnpkg.com/prebuild-install/-/prebuild-install-7.1.2.tgz#a5fd9986f5a6251fbc47e1e5c65de71e68c0a056" @@ -10595,11 +11022,6 @@ proto-list@~1.2.1: resolved "https://registry.yarnpkg.com/proto-list/-/proto-list-1.2.4.tgz#212d5bfe1318306a420f6402b8e26ff39647a849" integrity sha512-vtK/94akxsTMhe0/cbfpR+syPuszcuwhqVjJq26CuNDgFGj682oRBXOP5MJpv2r7JtE8MsiepGIqvvOTBwn2vA== -protocols@^2.0.0, protocols@^2.0.1: - version "2.0.1" - resolved "https://registry.yarnpkg.com/protocols/-/protocols-2.0.1.tgz#8f155da3fc0f32644e83c5782c8e8212ccf70a86" - integrity sha512-/XJ368cyBJ7fzLMwLKv1e4vLxOju2MNAIokcr7meSaNcVbWz/CPcW22cP04mwxOErdA5mwjA8Q6w/cdAQxVn7Q== - proxy-addr@~2.0.7: version "2.0.7" resolved "https://registry.yarnpkg.com/proxy-addr/-/proxy-addr-2.0.7.tgz#f19fe69ceab311eeb94b42e70e8c2070f9ba1025" @@ -10608,6 +11030,11 @@ proxy-addr@~2.0.7: forwarded "0.2.0" ipaddr.js "1.9.1" +proxy-from-env@^1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/proxy-from-env/-/proxy-from-env-1.1.0.tgz#e102f16ca355424865755d2c9e8ea4f24d58c3e2" + integrity sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg== + pseudomap@^1.0.1: version "1.0.2" resolved "https://registry.yarnpkg.com/pseudomap/-/pseudomap-1.0.2.tgz#f052a28da70e618917ef0a8ac34c1ae5a68286b3" @@ -10626,13 +11053,6 @@ punycode@^2.1.0, punycode@^2.1.1: resolved "https://registry.yarnpkg.com/punycode/-/punycode-2.3.1.tgz#027422e2faec0b25e1549c3e1bd8309b9133b6e5" integrity sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg== -qs@6.11.0: - version "6.11.0" - resolved "https://registry.yarnpkg.com/qs/-/qs-6.11.0.tgz#fd0d963446f7a65e1367e01abd85429453f0c37a" - integrity sha512-MvjoMCJwEarSbUYk5O+nmoSzSutSsTwF85zcHPQ9OrlFoZOYIjaqBAJIqIXjptyD5vThxGq52Xu/MaJzRkIk4Q== - dependencies: - side-channel "^1.0.4" - qs@6.13.0: version "6.13.0" resolved "https://registry.yarnpkg.com/qs/-/qs-6.13.0.tgz#6ca3bd58439f7e245655798997787b0d88a51906" @@ -10682,16 +11102,6 @@ range-parser@^1.2.1, range-parser@~1.2.1: resolved "https://registry.yarnpkg.com/range-parser/-/range-parser-1.2.1.tgz#3cf37023d199e1c24d1a55b84800c2f3e6468031" integrity sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg== -raw-body@2.5.1: - version "2.5.1" - resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-2.5.1.tgz#fe1b1628b181b700215e5fd42389f98b71392857" - integrity sha512-qqJBtEyVgS0ZmPGdCFPWJ3FreoqvG4MVQln/kCgF7Olq95IbOp0/BWyMwbdtn4VTvkM8Y7khCQ2Xgk/tcrCXig== - dependencies: - bytes "3.1.2" - http-errors "2.0.0" - iconv-lite "0.4.24" - unpipe "1.0.0" - raw-body@2.5.2, raw-body@^2.3.0: version "2.5.2" resolved "https://registry.yarnpkg.com/raw-body/-/raw-body-2.5.2.tgz#99febd83b90e08975087e8f1f9419a149366b68a" @@ -10717,17 +11127,16 @@ rbush@^3.0.1: dependencies: quickselect "^2.0.0" -rc-cascader@~3.28.2: - version "3.28.2" - resolved "https://registry.yarnpkg.com/rc-cascader/-/rc-cascader-3.28.2.tgz#91720d3498261a7bff9fffc953501a8830f601fb" - integrity sha512-8f+JgM83iLTvjgdkgU7GfI4qY8icXOBP0cGZjOdx2iJAkEe8ucobxDQAVE69UD/c3ehCxZlcgEHeD5hFmypbUw== +rc-cascader@~3.30.0: + version "3.30.0" + resolved "https://registry.yarnpkg.com/rc-cascader/-/rc-cascader-3.30.0.tgz#da3e35cadcc00c58c62a6757eca6c7147ff94ea8" + integrity sha512-rrzSbk1Bdqbu+pDwiLCLHu72+lwX9BZ28+JKzoi0DWZ4N29QYFeip8Gctl33QVd2Xg3Rf14D3yAOG76ElJw16w== dependencies: - "@babel/runtime" "^7.12.5" - array-tree-filter "^2.1.0" + "@babel/runtime" "^7.25.7" classnames "^2.3.1" - rc-select "~14.15.0" - rc-tree "~5.9.0" - rc-util "^5.37.0" + rc-select "~14.16.2" + rc-tree "~5.10.1" + rc-util "^5.43.0" rc-checkbox@~3.3.0: version "3.3.0" @@ -10738,10 +11147,10 @@ rc-checkbox@~3.3.0: classnames "^2.3.2" rc-util "^5.25.2" -rc-collapse@~3.8.0: - version "3.8.0" - resolved "https://registry.yarnpkg.com/rc-collapse/-/rc-collapse-3.8.0.tgz#02bcf81e1601aa185cd3b9fab0ceefd8dc11aefb" - integrity sha512-YVBkssrKPBG09TGfcWWGj8zJBYD9G3XuTy89t5iUmSXrIXEAnO1M+qjUxRW6b4Qi0+wNWG6MHJF/+US+nmIlzA== +rc-collapse@~3.9.0: + version "3.9.0" + resolved "https://registry.yarnpkg.com/rc-collapse/-/rc-collapse-3.9.0.tgz#972404ce7724e1c9d1d2476543e1175404a36806" + integrity sha512-swDdz4QZ4dFTo4RAUMLL50qP0EY62N2kvmk2We5xYdRwcRn8WcYtuetCJpwpaCbUfUt5+huLpVxhvmnK+PHrkA== dependencies: "@babel/runtime" "^7.10.1" classnames "2.x" @@ -10780,10 +11189,10 @@ rc-dropdown@~4.2.0: classnames "^2.2.6" rc-util "^5.17.0" -rc-field-form@~2.4.0: - version "2.4.0" - resolved "https://registry.yarnpkg.com/rc-field-form/-/rc-field-form-2.4.0.tgz#26997160d12ae43a94c356c1290bfc011c69b3ca" - integrity sha512-XZ/lF9iqf9HXApIHQHqzJK5v2w4mkUMsVqAzOyWVzoiwwXEavY6Tpuw7HavgzIoD+huVff4JghSGcgEfX6eycg== +rc-field-form@~2.5.0: + version "2.5.1" + resolved "https://registry.yarnpkg.com/rc-field-form/-/rc-field-form-2.5.1.tgz#30f3c529f86aec6af27589052df9c66cec94ceb4" + integrity sha512-33hunXwynQJyeae7LS3hMGTXNeRBjiPyPYgB0824EbmLHiXC1EBGyUwRh6xjLRy9c+en5WARYN0gJz5+JAqwig== dependencies: "@babel/runtime" "^7.18.0" "@rc-component/async-validator" "^5.0.3" @@ -10801,10 +11210,10 @@ rc-image@~7.11.0: rc-motion "^2.6.2" rc-util "^5.34.1" -rc-input-number@~9.2.0: - version "9.2.0" - resolved "https://registry.yarnpkg.com/rc-input-number/-/rc-input-number-9.2.0.tgz#7e9344ff054421d2bfff0eebd7c1b8ef22d12220" - integrity sha512-5XZFhBCV5f9UQ62AZ2hFbEY8iZT/dm23Q1kAg0H8EvOgD3UDbYYJAayoVIkM3lQaCqYAW5gV0yV3vjw1XtzWHg== +rc-input-number@~9.3.0: + version "9.3.0" + resolved "https://registry.yarnpkg.com/rc-input-number/-/rc-input-number-9.3.0.tgz#3403c1071fcb9dbf91073faddd80ea83bd3974df" + integrity sha512-JQ363ywqRyxwgVxpg2z2kja3CehTpYdqR7emJ/6yJjRdbvo+RvfE83fcpBCIJRq3zLp8SakmEXq60qzWyZ7Usw== dependencies: "@babel/runtime" "^7.10.1" "@rc-component/mini-decimal" "^1.0.1" @@ -10821,23 +11230,23 @@ rc-input@~1.6.0, rc-input@~1.6.3: classnames "^2.2.1" rc-util "^5.18.1" -rc-mentions@~2.16.1: - version "2.16.1" - resolved "https://registry.yarnpkg.com/rc-mentions/-/rc-mentions-2.16.1.tgz#5e54ebe3ce6cd79838846ff1c8cfaf2e7aa15cec" - integrity sha512-GnhSTGP9Mtv6pqFFGQze44LlrtWOjHNrUUAcsdo9DnNAhN4pwVPEWy4z+2jpjkiGlJ3VoXdvMHcNDQdfI9fEaw== +rc-mentions@~2.17.0: + version "2.17.0" + resolved "https://registry.yarnpkg.com/rc-mentions/-/rc-mentions-2.17.0.tgz#d16dd5c8e4db87862c1007f7195b0aea9247cdcd" + integrity sha512-sfHy+qLvc+p8jx8GUsujZWXDOIlIimp6YQz7N5ONQ6bHsa2kyG+BLa5k2wuxgebBbH97is33wxiyq5UkiXRpHA== dependencies: "@babel/runtime" "^7.22.5" "@rc-component/trigger" "^2.0.0" classnames "^2.2.6" rc-input "~1.6.0" - rc-menu "~9.15.1" + rc-menu "~9.16.0" rc-textarea "~1.8.0" rc-util "^5.34.1" -rc-menu@~9.15.1: - version "9.15.1" - resolved "https://registry.yarnpkg.com/rc-menu/-/rc-menu-9.15.1.tgz#d8b38ea534a7f596a8da063881519e7eaafca698" - integrity sha512-UKporqU6LPfHnpPmtP6hdEK4iO5Q+b7BRv/uRpxdIyDGplZy9jwUjsnpev5bs3PQKB0H0n34WAPDfjAfn3kAPA== +rc-menu@~9.16.0: + version "9.16.0" + resolved "https://registry.yarnpkg.com/rc-menu/-/rc-menu-9.16.0.tgz#53647f60f513bfa09bfc1accbd96a8df24900121" + integrity sha512-vAL0yqPkmXWk3+YKRkmIR8TYj3RVdEt3ptG2jCJXWNAvQbT0VJJdRyHZ7kG/l1JsZlB+VJq/VcYOo69VR4oD+w== dependencies: "@babel/runtime" "^7.10.1" "@rc-component/trigger" "^2.0.0" @@ -10884,10 +11293,10 @@ rc-pagination@~4.3.0: classnames "^2.3.2" rc-util "^5.38.0" -rc-picker@~4.6.15: - version "4.6.15" - resolved "https://registry.yarnpkg.com/rc-picker/-/rc-picker-4.6.15.tgz#1531c9c382a295e2d1f1f38440d6678b09cd0468" - integrity sha512-OWZ1yrMie+KN2uEUfYCfS4b2Vu6RC1FWwNI0s+qypsc3wRt7g+peuZKVIzXCTaJwyyZruo80+akPg2+GmyiJjw== +rc-picker@~4.8.0: + version "4.8.1" + resolved "https://registry.yarnpkg.com/rc-picker/-/rc-picker-4.8.1.tgz#105cfae323bf1db5e9f9e6fdc773ff3250e837de" + integrity sha512-lj9hXXMSkbjFUIhfQh8XH698ybxnoBOfq7pdM1FvfSyDwdFhdQa7dvsIYwo6Uz7Zp1wVkfw5rOJO3MpdWzoHsg== dependencies: "@babel/runtime" "^7.24.7" "@rc-component/trigger" "^2.0.0" @@ -10934,10 +11343,10 @@ rc-segmented@~2.5.0: rc-motion "^2.4.4" rc-util "^5.17.0" -rc-select@~14.15.0, rc-select@~14.15.2: - version "14.15.2" - resolved "https://registry.yarnpkg.com/rc-select/-/rc-select-14.15.2.tgz#d85fcf3a708bdf837b003feeed653347b8980ad0" - integrity sha512-oNoXlaFmpqXYcQDzcPVLrEqS2J9c+/+oJuGrlXeVVX/gVgrbHa5YcyiRUXRydFjyuA7GP3elRuLF7Y3Tfwltlw== +rc-select@~14.16.2, rc-select@~14.16.3: + version "14.16.3" + resolved "https://registry.yarnpkg.com/rc-select/-/rc-select-14.16.3.tgz#cd5395ef724d693a8f782ddd7bcc6253c31c65a9" + integrity sha512-51+j6s3fJJJXB7E+B6W1hM4Tjzv1B/Decooz9ilgegDBt3ZAth1b/xMwYCTrT5BbG2e53XACQsyDib2+3Ro1fg== dependencies: "@babel/runtime" "^7.10.1" "@rc-component/trigger" "^2.1.1" @@ -10974,10 +11383,10 @@ rc-switch@~4.1.0: classnames "^2.2.1" rc-util "^5.30.0" -rc-table@~7.47.5: - version "7.47.5" - resolved "https://registry.yarnpkg.com/rc-table/-/rc-table-7.47.5.tgz#3c530200baa82346c7e72fe9b1dbd47d4aa15838" - integrity sha512-fzq+V9j/atbPIcvs3emuclaEoXulwQpIiJA6/7ey52j8+9cJ4P8DGmp4YzfUVDrb3qhgedcVeD6eRgUrokwVEQ== +rc-table@~7.48.1: + version "7.48.1" + resolved "https://registry.yarnpkg.com/rc-table/-/rc-table-7.48.1.tgz#16ee3d82fa17284628d7883933b5d481238ea013" + integrity sha512-Z4mDKjWg+xz/Ezdw6ivWcbqRpaJ0QfCORRoRrlrw65KSGZLK8OcTdacH22/fyGb8L4It/0/9qcMm8VrVAk/WBw== dependencies: "@babel/runtime" "^7.10.1" "@rc-component/context" "^1.4.0" @@ -10986,15 +11395,15 @@ rc-table@~7.47.5: rc-util "^5.41.0" rc-virtual-list "^3.14.2" -rc-tabs@~15.3.0: - version "15.3.0" - resolved "https://registry.yarnpkg.com/rc-tabs/-/rc-tabs-15.3.0.tgz#3fcc332fbb9307d5eb147e0404daca871fb92a89" - integrity sha512-lzE18r+zppT/jZWOAWS6ntdkDUKHOLJzqMi5UAij1LeKwOaQaupupAoI9Srn73GRzVpmGznkECMRrzkRusC40A== +rc-tabs@~15.4.0: + version "15.4.0" + resolved "https://registry.yarnpkg.com/rc-tabs/-/rc-tabs-15.4.0.tgz#a829cabcb33f93525b548010f5bbf91dee7ac1d6" + integrity sha512-llKuyiAVqmXm2z7OrmhX5cNb2ueZaL8ZyA2P4R+6/72NYYcbEgOXibwHiQCFY2RiN3swXl53SIABi2CumUS02g== dependencies: "@babel/runtime" "^7.11.2" classnames "2.x" rc-dropdown "~4.2.0" - rc-menu "~9.15.1" + rc-menu "~9.16.0" rc-motion "^2.6.2" rc-resize-observer "^1.0.0" rc-util "^5.34.1" @@ -11019,21 +11428,21 @@ rc-tooltip@~6.2.1: "@rc-component/trigger" "^2.0.0" classnames "^2.3.1" -rc-tree-select@~5.23.0: - version "5.23.0" - resolved "https://registry.yarnpkg.com/rc-tree-select/-/rc-tree-select-5.23.0.tgz#e56da0923c7c11dea98d4e14bb76969283c94468" - integrity sha512-aQGi2tFSRw1WbXv0UVXPzHm09E0cSvUVZMLxQtMv3rnZZpNmdRXWrnd9QkLNlVH31F+X5rgghmdSFF3yZW0N9A== +rc-tree-select@~5.24.4: + version "5.24.4" + resolved "https://registry.yarnpkg.com/rc-tree-select/-/rc-tree-select-5.24.4.tgz#6789b8761daf53a8b91b75e9e6c4f720a7e8cc92" + integrity sha512-MzljkSkk7weKOcE853UtYlXB6uyUEzcEQhhpaCwE6jQPbmBUgGiRURuKWpYUnM/dXrwTTlCK969M6Pgjj35MLA== dependencies: - "@babel/runtime" "^7.10.1" + "@babel/runtime" "^7.25.7" classnames "2.x" - rc-select "~14.15.0" - rc-tree "~5.9.0" - rc-util "^5.16.1" + rc-select "~14.16.2" + rc-tree "~5.10.1" + rc-util "^5.43.0" -rc-tree@~5.9.0: - version "5.9.0" - resolved "https://registry.yarnpkg.com/rc-tree/-/rc-tree-5.9.0.tgz#1835b2bef36cfeb4ec15d62e0319fc503aa485f1" - integrity sha512-CPrgOvm9d/9E+izTONKSngNzQdIEjMox2PBufWjS1wf7vxtvmCWzK1SlpHbRY6IaBfJIeZ+88RkcIevf729cRg== +rc-tree@~5.10.1: + version "5.10.1" + resolved "https://registry.yarnpkg.com/rc-tree/-/rc-tree-5.10.1.tgz#8807614c54aaa39edc05392f0f5982b609d95255" + integrity sha512-FPXb3tT/u39mgjr6JNlHaUTYfHkVGW56XaGDahDpEFLGsnPxGcVLNTjcqoQb/GNbSCycl7tD7EvIymwOTP0+Yw== dependencies: "@babel/runtime" "^7.10.1" classnames "2.x" @@ -11147,6 +11556,15 @@ react-markdown@^9.0.1: unist-util-visit "^5.0.0" vfile "^6.0.0" +react-reconciler@^0.26.2: + version "0.26.2" + resolved "https://registry.yarnpkg.com/react-reconciler/-/react-reconciler-0.26.2.tgz#bbad0e2d1309423f76cf3c3309ac6c96e05e9d91" + integrity sha512-nK6kgY28HwrMNwDnMui3dvm3rCFjZrcGiuwLc5COUipBK5hWHLOxMJhSnSomirqWwjPBJKV1QcbkI0VJr7Gl1Q== + dependencies: + loose-envify "^1.1.0" + object-assign "^4.1.1" + scheduler "^0.20.2" + react-refresh@^0.14.0: version "0.14.2" resolved "https://registry.yarnpkg.com/react-refresh/-/react-refresh-0.14.2.tgz#3833da01ce32da470f1f936b9d477da5c7028bf9" @@ -11630,6 +12048,14 @@ sax@^1.2.4: resolved "https://registry.yarnpkg.com/sax/-/sax-1.4.1.tgz#44cc8988377f126304d3b3fc1010c733b929ef0f" integrity sha512-+aWOz7yVScEGoKNd4PA10LZ8sk0A/z5+nXQG5giUO5rprX9jgYsTdov9qCchZiPIZezbZH+jRut8nPodFAX4Jg== +scheduler@^0.20.2: + version "0.20.2" + resolved "https://registry.yarnpkg.com/scheduler/-/scheduler-0.20.2.tgz#4baee39436e34aa93b4874bddcbf0fe8b8b50e91" + integrity sha512-2eWfGgAqqWFGqtdMmcL5zCMK1U8KlXv8SQFGglL3CEtd0aDVDWgeF/YoCmvln55m5zSk3J/20hTaSBeSObsQDQ== + dependencies: + loose-envify "^1.1.0" + object-assign "^4.1.1" + scheduler@^0.23.2: version "0.23.2" resolved "https://registry.yarnpkg.com/scheduler/-/scheduler-0.23.2.tgz#414ba64a3b282892e944cf2108ecc078d115cdc3" @@ -11664,7 +12090,7 @@ schema-utils@^3.0.0, schema-utils@^3.1.1, schema-utils@^3.2.0: ajv "^6.12.5" ajv-keywords "^3.5.2" -schema-utils@^4.2.0: +schema-utils@^4.0.0, schema-utils@^4.2.0: version "4.2.0" resolved "https://registry.yarnpkg.com/schema-utils/-/schema-utils-4.2.0.tgz#70d7c93e153a273a805801882ebd3bff20d89c8b" integrity sha512-L0jRsrPpjdckP3oPug3/VxNKt2trR8TcabrM6FOAAlvC/9Phcmm+cuAgTlxBqdBR1WJx7Naj9WHw+aOmheSVbw== @@ -11805,6 +12231,11 @@ shallow-compare@^1.2.2: resolved "https://registry.yarnpkg.com/shallow-compare/-/shallow-compare-1.2.2.tgz#fa4794627bf455a47c4f56881d8a6132d581ffdb" integrity sha512-LUMFi+RppPlrHzbqmFnINTrazo0lPNwhcgzuAXVVcfy/mqPDrQmHAyz5bvV0gDAuRFrk804V0HpQ6u9sZ0tBeg== +shallowequal@1.1.0: + version "1.1.0" + resolved "https://registry.yarnpkg.com/shallowequal/-/shallowequal-1.1.0.tgz#188d521de95b9087404fd4dcb68b13df0ae4e7f8" + integrity sha512-y0m1JoUZSlPAjXVtPPW70aZWfIL/dSP7AFkRnniLCrK/8MDKog3TySTBmckD+RObVxH0v4Tox67+F14PdED2oQ== + sharp@^0.32.6: version "0.32.6" resolved "https://registry.yarnpkg.com/sharp/-/sharp-0.32.6.tgz#6ad30c0b7cd910df65d5f355f774aa4fce45732a" @@ -11968,7 +12399,7 @@ source-list-map@^2.0.0: resolved "https://registry.yarnpkg.com/source-list-map/-/source-list-map-2.0.1.tgz#3993bd873bfc48479cca9ea3a547835c7c154b34" integrity sha512-qnQ7gVMxGNxsiL4lEuJwe/To8UnK7fAnmbGEEH8RpLouuKbeEm0lhbQVFIrNSuB+G7tVrAlVsZgETT5nljf+Iw== -source-map-js@^1.2.1: +source-map-js@^1.2.0, source-map-js@^1.2.1: version "1.2.1" resolved "https://registry.yarnpkg.com/source-map-js/-/source-map-js-1.2.1.tgz#1ce5650fddd87abc099eda37dcff024c2667ae46" integrity sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA== @@ -12320,6 +12751,21 @@ style-to-object@^1.0.0: dependencies: inline-style-parser "0.2.4" +styled-components@^6.1.13: + version "6.1.13" + resolved "https://registry.yarnpkg.com/styled-components/-/styled-components-6.1.13.tgz#2d777750b773b31469bd79df754a32479e9f475e" + integrity sha512-M0+N2xSnAtwcVAQeFEsGWFFxXDftHUD7XrKla06QbpUMmbmtFBMMTcKWvFXtWxuD5qQkB8iU5gk6QASlx2ZRMw== + dependencies: + "@emotion/is-prop-valid" "1.2.2" + "@emotion/unitless" "0.8.1" + "@types/stylis" "4.2.5" + css-to-react-native "3.2.0" + csstype "3.1.3" + postcss "8.4.38" + shallowequal "1.1.0" + stylis "4.3.2" + tslib "2.6.2" + stylehacks@^5.1.1: version "5.1.1" resolved "https://registry.yarnpkg.com/stylehacks/-/stylehacks-5.1.1.tgz#7934a34eb59d7152149fa69d6e9e56f2fc34bcc9" @@ -12328,6 +12774,11 @@ stylehacks@^5.1.1: browserslist "^4.21.4" postcss-selector-parser "^6.0.4" +stylis@4.3.2: + version "4.3.2" + resolved "https://registry.yarnpkg.com/stylis/-/stylis-4.3.2.tgz#8f76b70777dd53eb669c6f58c997bf0a9972e444" + integrity sha512-bhtUjWd/z6ltJiQwg0dUfxEJ+W+jdqQd8TbWLWyeIJHlnsqmGLRFFd8e5mA0AZi/zx90smXRlN66YMTcaSFifg== + stylis@^4.3.3: version "4.3.4" resolved "https://registry.yarnpkg.com/stylis/-/stylis-4.3.4.tgz#ca5c6c4a35c4784e4e93a2a24dc4e9fa075250a4" @@ -12412,6 +12863,11 @@ system-architecture@^0.1.0: resolved "https://registry.yarnpkg.com/system-architecture/-/system-architecture-0.1.0.tgz#71012b3ac141427d97c67c56bc7921af6bff122d" integrity sha512-ulAk51I9UVUyJgxlv9M6lFot2WP3e7t8Kz9+IS6D4rVba1tR9kON+Ey69f+1R4Q8cd45Lod6a4IcJIxnzGc/zA== +tabbable@^6.0.0: + version "6.2.0" + resolved "https://registry.yarnpkg.com/tabbable/-/tabbable-6.2.0.tgz#732fb62bc0175cfcec257330be187dcfba1f3b97" + integrity sha512-Cat63mxsVJlzYvN51JmVXIgNoUokrIaT2zLclCXjRd8boZ0004U4KCs/sToJ75C6sdlByWxpYnb5Boif1VSFew== + table@^6.0.9: version "6.8.2" resolved "https://registry.yarnpkg.com/table/-/table-6.8.2.tgz#c5504ccf201213fa227248bdc8c5569716ac6c58" @@ -12673,12 +13129,17 @@ tsconfig-paths@^3.15.0: minimist "^1.2.6" strip-bom "^3.0.0" +tslib@2.6.2: + version "2.6.2" + resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.6.2.tgz#703ac29425e7b37cd6fd456e92404d46d1f3e4ae" + integrity sha512-AEYxH93jGFPn/a2iVAwW87VuUIkR1FVUKB77NwMF7nBTDkDrrT/Hpt/IrCJ0QXhW27jTBDcf5ZY7w6RiqTMw2Q== + tslib@^1.10.0, tslib@^1.8.1, tslib@^1.9.0: version "1.14.1" resolved "https://registry.yarnpkg.com/tslib/-/tslib-1.14.1.tgz#cf2d38bdc34a134bcaf1091c41f6619e2f672d00" integrity sha512-Xni35NKzjgMrwevysHTCArtLDpPvye8zV/0E4EyYn43P7/7qvQwPh9BGkHewbMulVntbigmcT7rdX3BNo9wRJg== -tslib@^2.0.3, tslib@^2.1.0, tslib@^2.3.1, tslib@^2.4.0, tslib@^2.5.3: +tslib@^2.0.0, tslib@^2.0.3, tslib@^2.1.0, tslib@^2.3.1, tslib@^2.4.0, tslib@^2.5.0, tslib@^2.5.3, tslib@^2.8.0: version "2.8.1" resolved "https://registry.yarnpkg.com/tslib/-/tslib-2.8.1.tgz#612efe4ed235d567e8aba5f2a5fab70280ade83f" integrity sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w== @@ -12838,7 +13299,7 @@ unc-path-regex@^0.1.2: resolved "https://registry.yarnpkg.com/unc-path-regex/-/unc-path-regex-0.1.2.tgz#e73dd3d7b0d7c5ed86fbac6b0ae7d8c6a69d50fa" integrity sha512-eXL4nmJT7oCpkZsHZUOJo8hcX3GbsiDOa0Qu9F646fi8dT3XuSVopVqAcEiVzSKKH7UoDti23wNX3qGFxcW5Qg== -undici-types@~6.19.2, undici-types@~6.19.8: +undici-types@~6.19.8: version "6.19.8" resolved "https://registry.yarnpkg.com/undici-types/-/undici-types-6.19.8.tgz#35111c9d1437ab83a7cdc0abae2f26d88eda0a02" integrity sha512-ve2KP6f/JnbPBFyobGHuerC9g1FYGn/F8n1LWTwNxCEzd6IfqTwUQcNXgEtmmQ6DlRrC1hrSrBnCZPokRrDHjw== @@ -13207,17 +13668,16 @@ webidl-conversions@^3.0.0: resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-3.0.1.tgz#24534275e2a7bc6be7bc86611cc16ae0a5654871" integrity sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ== -webpack-dev-middleware@^4.3.0: - version "4.3.0" - resolved "https://registry.yarnpkg.com/webpack-dev-middleware/-/webpack-dev-middleware-4.3.0.tgz#179cc40795882cae510b1aa7f3710cbe93c9333e" - integrity sha512-PjwyVY95/bhBh6VUqt6z4THplYcsvQ8YNNBTBM873xLVmw8FLeALn0qurHbs9EmcfhzQis/eoqypSnZeuUz26w== +webpack-dev-middleware@^5.3.4: + version "5.3.4" + resolved "https://registry.yarnpkg.com/webpack-dev-middleware/-/webpack-dev-middleware-5.3.4.tgz#eb7b39281cbce10e104eb2b8bf2b63fce49a3517" + integrity sha512-BVdTqhhs+0IfoeAf7EoH5WE+exCmqGerHfDM0IL096Px60Tq2Mn9MAbnaGUe6HiMa41KMCYF19gyzZmBcq/o4Q== dependencies: - colorette "^1.2.2" - mem "^8.1.1" - memfs "^3.2.2" - mime-types "^2.1.30" + colorette "^2.0.10" + memfs "^3.4.3" + mime-types "^2.1.31" range-parser "^1.2.1" - schema-utils "^3.0.0" + schema-utils "^4.0.0" webpack-merge@^5.9.0: version "5.10.0" diff --git a/python/packages/autogen-studio/notebooks/tutorial.ipynb b/python/packages/autogen-studio/notebooks/tutorial.ipynb index 4825d6580f51..8e749f743839 100644 --- a/python/packages/autogen-studio/notebooks/tutorial.ipynb +++ b/python/packages/autogen-studio/notebooks/tutorial.ipynb @@ -23,7 +23,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "task_result=TaskResult(messages=[TextMessage(source='user', models_usage=None, content='What is the weather in New York?'), ToolCallMessage(source='writing_agent', models_usage=RequestUsage(prompt_tokens=65, completion_tokens=15), content=[FunctionCall(id='call_1LrIS8t1wuK3KzDPP4DuwuiQ', arguments='{\"city\":\"New York\"}', name='get_weather')]), ToolCallResultMessage(source='writing_agent', models_usage=None, content=[FunctionExecutionResult(content='The weather in New York is 73 degrees and Sunny.', call_id='call_1LrIS8t1wuK3KzDPP4DuwuiQ')]), TextMessage(source='writing_agent', models_usage=RequestUsage(prompt_tokens=97, completion_tokens=14), content='The current weather in New York is 73 degrees and sunny.'), TextMessage(source='writing_agent', models_usage=RequestUsage(prompt_tokens=123, completion_tokens=9), content='What is the weather in Los Angeles?')], stop_reason='Maximum number of messages 5 reached, current message count: 5') usage='' duration=2.2458291053771973\n" + "task_result=TaskResult(messages=[TextMessage(source='user', models_usage=None, content='What is the weather in New York?'), ToolCallMessage(source='writing_agent', models_usage=RequestUsage(prompt_tokens=65, completion_tokens=15), content=[FunctionCall(id='call_x8C5nib1PJkMZGQ6zrNUlfa0', arguments='{\"city\":\"New York\"}', name='get_weather')]), ToolCallResultMessage(source='writing_agent', models_usage=None, content=[FunctionExecutionResult(content='The weather in New York is 73 degrees and Sunny.', call_id='call_x8C5nib1PJkMZGQ6zrNUlfa0')]), TextMessage(source='writing_agent', models_usage=RequestUsage(prompt_tokens=97, completion_tokens=14), content='The weather in New York is currently 73 degrees and sunny.'), TextMessage(source='writing_agent', models_usage=RequestUsage(prompt_tokens=123, completion_tokens=13), content='Would you like to know the weather in any other city?')], stop_reason='Maximum number of messages 5 reached, current message count: 5') usage='' duration=1.9984567165374756\n" ] } ], @@ -45,11 +45,11 @@ "output_type": "stream", "text": [ "source='user' models_usage=None content='What is the weather in New York?'\n", - "source='writing_agent' models_usage=RequestUsage(prompt_tokens=65, completion_tokens=15) content=[FunctionCall(id='call_Otg8Exxp7uAcsZnRyTWRYGkb', arguments='{\"city\":\"New York\"}', name='get_weather')]\n", - "source='writing_agent' models_usage=None content=[FunctionExecutionResult(content='The weather in New York is 73 degrees and Sunny.', call_id='call_Otg8Exxp7uAcsZnRyTWRYGkb')]\n", + "source='writing_agent' models_usage=RequestUsage(prompt_tokens=65, completion_tokens=15) content=[FunctionCall(id='call_Gwnfsa8ndnOsXTvRECTr92hr', arguments='{\"city\":\"New York\"}', name='get_weather')]\n", + "source='writing_agent' models_usage=None content=[FunctionExecutionResult(content='The weather in New York is 73 degrees and Sunny.', call_id='call_Gwnfsa8ndnOsXTvRECTr92hr')]\n", "source='writing_agent' models_usage=RequestUsage(prompt_tokens=97, completion_tokens=14) content='The weather in New York is currently 73 degrees and sunny.'\n", - "source='writing_agent' models_usage=RequestUsage(prompt_tokens=123, completion_tokens=16) content='Would you like to know anything else about the weather or any other information?'\n", - "task_result=TaskResult(messages=[TextMessage(source='user', models_usage=None, content='What is the weather in New York?'), ToolCallMessage(source='writing_agent', models_usage=RequestUsage(prompt_tokens=65, completion_tokens=15), content=[FunctionCall(id='call_Otg8Exxp7uAcsZnRyTWRYGkb', arguments='{\"city\":\"New York\"}', name='get_weather')]), ToolCallResultMessage(source='writing_agent', models_usage=None, content=[FunctionExecutionResult(content='The weather in New York is 73 degrees and Sunny.', call_id='call_Otg8Exxp7uAcsZnRyTWRYGkb')]), TextMessage(source='writing_agent', models_usage=RequestUsage(prompt_tokens=97, completion_tokens=14), content='The weather in New York is currently 73 degrees and sunny.'), TextMessage(source='writing_agent', models_usage=RequestUsage(prompt_tokens=123, completion_tokens=16), content='Would you like to know anything else about the weather or any other information?')], stop_reason='Maximum number of messages 5 reached, current message count: 5') usage='' duration=1.9408440589904785\n" + "source='writing_agent' models_usage=RequestUsage(prompt_tokens=123, completion_tokens=14) content='The weather in New York is currently 73 degrees and sunny.'\n", + "task_result=TaskResult(messages=[TextMessage(source='user', models_usage=None, content='What is the weather in New York?'), ToolCallMessage(source='writing_agent', models_usage=RequestUsage(prompt_tokens=65, completion_tokens=15), content=[FunctionCall(id='call_Gwnfsa8ndnOsXTvRECTr92hr', arguments='{\"city\":\"New York\"}', name='get_weather')]), ToolCallResultMessage(source='writing_agent', models_usage=None, content=[FunctionExecutionResult(content='The weather in New York is 73 degrees and Sunny.', call_id='call_Gwnfsa8ndnOsXTvRECTr92hr')]), TextMessage(source='writing_agent', models_usage=RequestUsage(prompt_tokens=97, completion_tokens=14), content='The weather in New York is currently 73 degrees and sunny.'), TextMessage(source='writing_agent', models_usage=RequestUsage(prompt_tokens=123, completion_tokens=14), content='The weather in New York is currently 73 degrees and sunny.')], stop_reason='Maximum number of messages 5 reached, current message count: 5') usage='' duration=2.363379955291748\n" ] } ], @@ -79,9 +79,9 @@ "text": [ "INFO [alembic.runtime.migration] Context impl SQLiteImpl.\n", "INFO [alembic.runtime.migration] Will assume non-transactional DDL.\n", - "\u001b[32m2024-11-09 11:55:02.090\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mautogenstudio.database.schema_manager\u001b[0m:\u001b[36mupgrade_schema\u001b[0m:\u001b[36m390\u001b[0m - \u001b[1mSchema upgraded successfully to head\u001b[0m\n", - "\u001b[32m2024-11-09 11:55:02.090\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mautogenstudio.database.db_manager\u001b[0m:\u001b[36m__init__\u001b[0m:\u001b[36m34\u001b[0m - \u001b[1mDatabase schema was upgraded automatically\u001b[0m\n", - "\u001b[32m2024-11-09 11:55:02.092\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mautogenstudio.database.db_manager\u001b[0m:\u001b[36mcreate_db_and_tables\u001b[0m:\u001b[36m108\u001b[0m - \u001b[1mDatabase tables created successfully\u001b[0m\n" + "\u001b[32m2024-11-14 09:06:25.242\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mautogenstudio.database.schema_manager\u001b[0m:\u001b[36mupgrade_schema\u001b[0m:\u001b[36m390\u001b[0m - \u001b[1mSchema upgraded successfully to head\u001b[0m\n", + "\u001b[32m2024-11-14 09:06:25.243\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mautogenstudio.database.db_manager\u001b[0m:\u001b[36m__init__\u001b[0m:\u001b[36m34\u001b[0m - \u001b[1mDatabase schema was upgraded automatically\u001b[0m\n", + "\u001b[32m2024-11-14 09:06:25.244\u001b[0m | \u001b[1mINFO \u001b[0m | \u001b[36mautogenstudio.database.db_manager\u001b[0m:\u001b[36mcreate_db_and_tables\u001b[0m:\u001b[36m108\u001b[0m - \u001b[1mDatabase tables created successfully\u001b[0m\n" ] } ], @@ -152,7 +152,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "43 teams in database\n" + "53 teams in database\n" ] } ], @@ -176,24 +176,17 @@ "metadata": {}, "outputs": [], "source": [ - "from autogenstudio.database import ConfigurationManager \n", + "from autogenstudio.database import ConfigurationManager \n", + "\n", "config_manager = ConfigurationManager(dbmanager)\n", " " ] }, { "cell_type": "code", - "execution_count": 7, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "message=\"team with team_type='TeamTypes.ROUND_ROBIN' and name='weather_team' already exists\" status=True data={'id': 42}\n" - ] - } - ], + "outputs": [], "source": [ "result = await config_manager.import_component(\"team.json\", user_id=\"user_id\", check_exists=True)\n", "print(result)" @@ -208,7 +201,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "message='Directory import complete' status=True data=[{'component': 'team', 'status': True, 'message': 'Team Created Successfully', 'id': 44}]\n" + "message='Directory import complete' status=True data=[{'component': 'team', 'status': True, 'message': 'Team Created Successfully', 'id': 54}]\n" ] } ], @@ -226,7 +219,7 @@ "name": "stdout", "output_type": "stream", "text": [ - "44 teams in database\n" + "54 teams in database\n" ] } ], @@ -235,9 +228,16 @@ "print(len(all_teams.data), \"teams in database\")" ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Sample AgentChat Example (Python)" + ] + }, { "cell_type": "code", - "execution_count": 14, + "execution_count": 10, "metadata": {}, "outputs": [], "source": [ @@ -250,28 +250,28 @@ " \"planner_agent\",\n", " model_client=OpenAIChatCompletionClient(model=\"gpt-4\"),\n", " description=\"A helpful assistant that can plan trips.\",\n", - " system_message=\"You are a helpful assistant that can suggest a travel plan for a user based on their request.\",\n", + " system_message=\"You are a helpful assistant that can suggest a travel plan for a user based on their request. Respond with a single sentence\",\n", ")\n", "\n", "local_agent = AssistantAgent(\n", " \"local_agent\",\n", " model_client=OpenAIChatCompletionClient(model=\"gpt-4\"),\n", " description=\"A local assistant that can suggest local activities or places to visit.\",\n", - " system_message=\"You are a helpful assistant that can suggest authentic and interesting local activities or places to visit for a user and can utilize any context information provided.\",\n", + " system_message=\"You are a helpful assistant that can suggest authentic and interesting local activities or places to visit for a user and can utilize any context information provided. Respond with a single sentence\",\n", ")\n", "\n", "language_agent = AssistantAgent(\n", " \"language_agent\",\n", " model_client=OpenAIChatCompletionClient(model=\"gpt-4\"),\n", " description=\"A helpful assistant that can provide language tips for a given destination.\",\n", - " system_message=\"You are a helpful assistant that can review travel plans, providing feedback on important/critical tips about how best to address language or communication challenges for the given destination. If the plan already includes language tips, you can mention that the plan is satisfactory, with rationale.\",\n", + " system_message=\"You are a helpful assistant that can review travel plans, providing feedback on important/critical tips about how best to address language or communication challenges for the given destination. If the plan already includes language tips, you can mention that the plan is satisfactory, with rationale.Respond with a single sentence\",\n", ")\n", "\n", "travel_summary_agent = AssistantAgent(\n", " \"travel_summary_agent\",\n", " model_client=OpenAIChatCompletionClient(model=\"gpt-4\"),\n", " description=\"A helpful assistant that can summarize the travel plan.\",\n", - " system_message=\"You are a helpful assistant that can take in all of the suggestions and advice from the other agents and provide a detailed tfinal travel plan. You must ensure th b at the final plan is integrated and complete. YOUR FINAL RESPONSE MUST BE THE COMPLETE PLAN. When the plan is complete and all perspectives are integrated, you can respond with TERMINATE.\",\n", + " system_message=\"You are a helpful assistant that can take in all of the suggestions and advice from the other agents and provide a detailed tfinal travel plan. You must ensure th b at the final plan is integrated and complete. YOUR FINAL RESPONSE MUST BE THE COMPLETE PLAN. When the plan is complete and all perspectives are integrated, you can respond with TERMINATE.Respond with a single sentence\",\n", ")\n", "\n", "termination = TextMentionTermination(\"TERMINATE\")\n", @@ -282,7 +282,7 @@ }, { "cell_type": "code", - "execution_count": 15, + "execution_count": 11, "metadata": {}, "outputs": [ { @@ -290,11 +290,11 @@ "output_type": "stream", "text": [ "source='user' models_usage=None content='Plan a 3 day trip to Nepal.'\n", - "source='planner_agent' models_usage=RequestUsage(prompt_tokens=40, completion_tokens=394) content=\"Sure, here is a simple 3-day itinerary for a trip to Nepal:\\n\\n**Day 1: Explore Kathmandu:**\\n\\n- Morning: Arrival in Tribhuvan International Airport. Take an airport taxi to your hotel in Thamel area – the touristic heart of Kathmandu. Freshen up after your journey.\\n\\n- Afternoon: Visit the historical and cultural hub - Kathmandu Durbar Square. This square is a complex of palaces, courtyards and temples. You can enjoy vibrant local life and activities around the square.\\n\\n- Evening: You can explore lots of shops, cafes and restaurants in the Thamel. Experience local food and artisanal delicacies.\\n\\n**Day 2: Visit Bhaktapur and Nagarkot:**\\n\\n- Morning/Early Afternoon: Take a short ride to Bhaktapur city. Visit sites like Bhaktapur Durbar Square, Nyatapola Temple and Bhaktapur National Art Gallery.\\n\\n- Afternoon/Evening: Travel to Nagarkot. If the weather is clear, this place offers great panoramic views of the Himalayan range including Mount Everest during sunset. It's also quite known for hiking so make sure to check out the trails if you're into that.\\n\\n**Day 3: Experience Patan and Return to Kathmandu:**\\n\\n- Morning: Travel back towards Patan, another royal city of Nepal. Visit the Patan Museum, Golden Temple and the Patan Durbar Square.\\n\\n- Afternoon: Return to Kathmandu and wrap up your trip by visiting a few more sites like Swayambhunath Stupa and Pashupatinath Temple.\\n\\n- Evening: Check out any last minute souvenirs from local markets and try some local Nepalese cuisine. \\n\\nPlease ensure to check travel advisories and any necessary travel documentations before your trip. It's also important to make arrangement for proper accommodation and transportation in advance of your trip.\\n\"\n", - "source='local_agent' models_usage=RequestUsage(prompt_tokens=451, completion_tokens=394) content=\"Sure, here is a simple 3-day itinerary for a trip to Nepal:\\n\\n**Day 1: Explore Kathmandu:**\\n\\n- Morning: Arrival in Tribhuvan International Airport. Take an airport taxi to your hotel in Thamel area – the touristic heart of Kathmandu. Freshen up after your journey.\\n\\n- Afternoon: Visit the historical and cultural hub - Kathmandu Durbar Square. This square is a complex of palaces, courtyards and temples. You can enjoy vibrant local life and activities around the square.\\n\\n- Evening: You can explore lots of shops, cafes and restaurants in the Thamel. Experience local food and artisanal delicacies.\\n\\n**Day 2: Visit Bhaktapur and Nagarkot:**\\n\\n- Morning/Early Afternoon: Take a short ride to Bhaktapur city. Visit sites like Bhaktapur Durbar Square, Nyatapola Temple and Bhaktapur National Art Gallery.\\n\\n- Afternoon/Evening: Travel to Nagarkot. If the weather is clear, this place offers great panoramic views of the Himalayan range including Mount Everest during sunset. It's also quite known for hiking so make sure to check out the trails if you're into that.\\n\\n**Day 3: Experience Patan and Return to Kathmandu:**\\n\\n- Morning: Travel back towards Patan, another royal city of Nepal. Visit the Patan Museum, Golden Temple and the Patan Durbar Square.\\n\\n- Afternoon: Return to Kathmandu and wrap up your trip by visiting a few more sites like Swayambhunath Stupa and Pashupatinath Temple.\\n\\n- Evening: Check out any last minute souvenirs from local markets and try some local Nepalese cuisine. \\n\\nPlease ensure to check travel advisories and any necessary travel documentations before your trip. It's also important to make arrangement for proper accommodation and transportation in advance of your trip.\"\n", - "source='language_agent' models_usage=RequestUsage(prompt_tokens=876, completion_tokens=246) content=\"Your travel itinerary seems to be well designed and comprehensive. However, communication and language fluency can significantly impact your travel experience. Here are some essential tips on this topic:\\n\\n1. The official language of Nepal is Nepali, but English is understood by a significant portion of the population in urban areas and tourist spots. Nevertheless, it can be helpful if you learn some basic Nepali phrases such as greetings, thanking, apologizing, and expressing urgency.\\n\\n2. Download a translator app on your phone like Google Translate in case you encounter someone who only speaks Nepali or for translating signs and menus.\\n\\n3. Be mindful of cultural sensitivities when communicating. Nepalese people are generally very friendly and hospitable, but it's important to respect their traditions and culture. Avoid criticism and negative comments about their culture and religion.\\n\\n4. Guide books, travel blogs, and forums can offer insights and useful phrases in Nepali that can enhance your interaction with locals.\\n\\n5. If you prefer, you may hire a local tour guide who can communicate in English to assist you, especially in rural areas.\\n\\nFeel free to modify your plan as per your preferences and ensure that you are well-prepared to handle communication challenges to have a seamless experience.\"\n", - "source='travel_summary_agent' models_usage=RequestUsage(prompt_tokens=1146, completion_tokens=484) content=\"Here is the final integrated 3-day travel plan for your trip to Nepal:\\n\\n**Day 1: Explore Kathmandu:**\\n\\n- Morning: Arrival in Tribhuvan International Airport. Take an airport taxi to your hotel in the Thamel area – the touristic heart of Kathmandu. Freshen up after your journey.\\n\\n- Afternoon: Visit the historic Kathmandu Durbar Square, a complex of palaces, courtyards and temples where you can enjoy vibrant local life and activities.\\n\\n- Evening: Wander around the Thamel district, checking out the array of shops, cafes and restaurants. Try local food and artisanal delicacies.\\n\\n**Day 2: Immerse in Bhaktapur and Relax in Nagarkot:**\\n\\n- Morning/Early Afternoon: Take a short trip to Bhaktapur, a culturally rich city known for its preserved palace courtyards and old city center. Visit sites like Bhaktapur Durbar Square, Nyatapola Temple, and Bhaktapur National Art Gallery.\\n\\n- Late Afternoon/Evening: Travel to Nagarkot, known for its stunning Himalayan views, including Mount Everest during clear weather. Consider going hiking if you're into outdoor activities.\\n\\n**Day 3: Experience Patan and Return to Kathmandu:**\\n\\n- Morning: Travel back towards Patan, another royal city of Nepal. Visit the Patan Museum, Golden Temple, and the Patan Durbar Square.\\n\\n- Afternoon: Return to Kathmandu and wrap up your trip by visiting the Swayambhunath Stupa and Pashupatinath Temple.\\n\\n- Evening: Explore local markets for last minute souvenirs and sample more of the delicious local Nepalese cuisine. \\n\\n_Note on communication:_ Although English is understood in urban areas and major tourist spots, learning a few basic Nepali phrases can help you during your travels. Be mindful of cultural sensitivities, and consider downloading a translation app.\\n\\nIt's also important to secure accommodations in advance, comply with travel document requirements and check travel advisories before your trip. If you want more seamless communication, especially in rural areas, consider hiring a local English-speaking tour guide. \\n\\nYou are now all set for a memorable journey to Nepal. Use this plan as a guideline and feel free to modify it to better suit your preferences and schedule. Safe travels!\\n\\nTERMINATE\"\n", - "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Plan a 3 day trip to Nepal.'), TextMessage(source='planner_agent', models_usage=RequestUsage(prompt_tokens=40, completion_tokens=394), content=\"Sure, here is a simple 3-day itinerary for a trip to Nepal:\\n\\n**Day 1: Explore Kathmandu:**\\n\\n- Morning: Arrival in Tribhuvan International Airport. Take an airport taxi to your hotel in Thamel area – the touristic heart of Kathmandu. Freshen up after your journey.\\n\\n- Afternoon: Visit the historical and cultural hub - Kathmandu Durbar Square. This square is a complex of palaces, courtyards and temples. You can enjoy vibrant local life and activities around the square.\\n\\n- Evening: You can explore lots of shops, cafes and restaurants in the Thamel. Experience local food and artisanal delicacies.\\n\\n**Day 2: Visit Bhaktapur and Nagarkot:**\\n\\n- Morning/Early Afternoon: Take a short ride to Bhaktapur city. Visit sites like Bhaktapur Durbar Square, Nyatapola Temple and Bhaktapur National Art Gallery.\\n\\n- Afternoon/Evening: Travel to Nagarkot. If the weather is clear, this place offers great panoramic views of the Himalayan range including Mount Everest during sunset. It's also quite known for hiking so make sure to check out the trails if you're into that.\\n\\n**Day 3: Experience Patan and Return to Kathmandu:**\\n\\n- Morning: Travel back towards Patan, another royal city of Nepal. Visit the Patan Museum, Golden Temple and the Patan Durbar Square.\\n\\n- Afternoon: Return to Kathmandu and wrap up your trip by visiting a few more sites like Swayambhunath Stupa and Pashupatinath Temple.\\n\\n- Evening: Check out any last minute souvenirs from local markets and try some local Nepalese cuisine. \\n\\nPlease ensure to check travel advisories and any necessary travel documentations before your trip. It's also important to make arrangement for proper accommodation and transportation in advance of your trip.\\n\"), TextMessage(source='local_agent', models_usage=RequestUsage(prompt_tokens=451, completion_tokens=394), content=\"Sure, here is a simple 3-day itinerary for a trip to Nepal:\\n\\n**Day 1: Explore Kathmandu:**\\n\\n- Morning: Arrival in Tribhuvan International Airport. Take an airport taxi to your hotel in Thamel area – the touristic heart of Kathmandu. Freshen up after your journey.\\n\\n- Afternoon: Visit the historical and cultural hub - Kathmandu Durbar Square. This square is a complex of palaces, courtyards and temples. You can enjoy vibrant local life and activities around the square.\\n\\n- Evening: You can explore lots of shops, cafes and restaurants in the Thamel. Experience local food and artisanal delicacies.\\n\\n**Day 2: Visit Bhaktapur and Nagarkot:**\\n\\n- Morning/Early Afternoon: Take a short ride to Bhaktapur city. Visit sites like Bhaktapur Durbar Square, Nyatapola Temple and Bhaktapur National Art Gallery.\\n\\n- Afternoon/Evening: Travel to Nagarkot. If the weather is clear, this place offers great panoramic views of the Himalayan range including Mount Everest during sunset. It's also quite known for hiking so make sure to check out the trails if you're into that.\\n\\n**Day 3: Experience Patan and Return to Kathmandu:**\\n\\n- Morning: Travel back towards Patan, another royal city of Nepal. Visit the Patan Museum, Golden Temple and the Patan Durbar Square.\\n\\n- Afternoon: Return to Kathmandu and wrap up your trip by visiting a few more sites like Swayambhunath Stupa and Pashupatinath Temple.\\n\\n- Evening: Check out any last minute souvenirs from local markets and try some local Nepalese cuisine. \\n\\nPlease ensure to check travel advisories and any necessary travel documentations before your trip. It's also important to make arrangement for proper accommodation and transportation in advance of your trip.\"), TextMessage(source='language_agent', models_usage=RequestUsage(prompt_tokens=876, completion_tokens=246), content=\"Your travel itinerary seems to be well designed and comprehensive. However, communication and language fluency can significantly impact your travel experience. Here are some essential tips on this topic:\\n\\n1. The official language of Nepal is Nepali, but English is understood by a significant portion of the population in urban areas and tourist spots. Nevertheless, it can be helpful if you learn some basic Nepali phrases such as greetings, thanking, apologizing, and expressing urgency.\\n\\n2. Download a translator app on your phone like Google Translate in case you encounter someone who only speaks Nepali or for translating signs and menus.\\n\\n3. Be mindful of cultural sensitivities when communicating. Nepalese people are generally very friendly and hospitable, but it's important to respect their traditions and culture. Avoid criticism and negative comments about their culture and religion.\\n\\n4. Guide books, travel blogs, and forums can offer insights and useful phrases in Nepali that can enhance your interaction with locals.\\n\\n5. If you prefer, you may hire a local tour guide who can communicate in English to assist you, especially in rural areas.\\n\\nFeel free to modify your plan as per your preferences and ensure that you are well-prepared to handle communication challenges to have a seamless experience.\"), TextMessage(source='travel_summary_agent', models_usage=RequestUsage(prompt_tokens=1146, completion_tokens=484), content=\"Here is the final integrated 3-day travel plan for your trip to Nepal:\\n\\n**Day 1: Explore Kathmandu:**\\n\\n- Morning: Arrival in Tribhuvan International Airport. Take an airport taxi to your hotel in the Thamel area – the touristic heart of Kathmandu. Freshen up after your journey.\\n\\n- Afternoon: Visit the historic Kathmandu Durbar Square, a complex of palaces, courtyards and temples where you can enjoy vibrant local life and activities.\\n\\n- Evening: Wander around the Thamel district, checking out the array of shops, cafes and restaurants. Try local food and artisanal delicacies.\\n\\n**Day 2: Immerse in Bhaktapur and Relax in Nagarkot:**\\n\\n- Morning/Early Afternoon: Take a short trip to Bhaktapur, a culturally rich city known for its preserved palace courtyards and old city center. Visit sites like Bhaktapur Durbar Square, Nyatapola Temple, and Bhaktapur National Art Gallery.\\n\\n- Late Afternoon/Evening: Travel to Nagarkot, known for its stunning Himalayan views, including Mount Everest during clear weather. Consider going hiking if you're into outdoor activities.\\n\\n**Day 3: Experience Patan and Return to Kathmandu:**\\n\\n- Morning: Travel back towards Patan, another royal city of Nepal. Visit the Patan Museum, Golden Temple, and the Patan Durbar Square.\\n\\n- Afternoon: Return to Kathmandu and wrap up your trip by visiting the Swayambhunath Stupa and Pashupatinath Temple.\\n\\n- Evening: Explore local markets for last minute souvenirs and sample more of the delicious local Nepalese cuisine. \\n\\n_Note on communication:_ Although English is understood in urban areas and major tourist spots, learning a few basic Nepali phrases can help you during your travels. Be mindful of cultural sensitivities, and consider downloading a translation app.\\n\\nIt's also important to secure accommodations in advance, comply with travel document requirements and check travel advisories before your trip. If you want more seamless communication, especially in rural areas, consider hiring a local English-speaking tour guide. \\n\\nYou are now all set for a memorable journey to Nepal. Use this plan as a guideline and feel free to modify it to better suit your preferences and schedule. Safe travels!\\n\\nTERMINATE\")], stop_reason=\"Text 'TERMINATE' mentioned\")\n" + "source='planner_agent' models_usage=RequestUsage(prompt_tokens=45, completion_tokens=54) content=\"Consider starting your 3-day trip to Nepal with a cultural tour in Kathmandu Valley, followed by an exploration of Pokhara's natural beauty on the second day, and finally, indulge in a thrilling safari at Chitwan National Park on the third day.\"\n", + "source='local_agent' models_usage=RequestUsage(prompt_tokens=116, completion_tokens=54) content=\"Consider starting your 3-day trip to Nepal with a cultural tour in Kathmandu Valley, followed by an exploration of Pokhara's natural beauty on the second day, and finally, indulge in a thrilling safari at Chitwan National Park on the third day.\"\n", + "source='language_agent' models_usage=RequestUsage(prompt_tokens=201, completion_tokens=45) content=\"Your travel plan lacks a mention of dealing with potential language barriers; it might be useful to learn some phrases in Nepali, as it's the official language of Nepal, or have a local translation app handy during your trip.\"\n", + "source='travel_summary_agent' models_usage=RequestUsage(prompt_tokens=270, completion_tokens=237) content='Day 1: Start your adventure in the capital city, Kathmandu. Take a guided tour of Kathmandu Valley to explore its UNESCO World Heritage Sites, such as the Durbar Squares, Swayambhunath Stupa, and Boudhanath Stupa. Engage with the locals and sample some traditional Nepalese cuisine.\\n\\nDay 2: Proceed to Pokhara, known for its stunning natural beauty. Visit the iconic Phewa Lake and enjoy a boat ride, then trek to the Peace Pagoda for a panoramic view of the city. Round off the day with a visit to the fascinating Pokhara Mountain Museum.\\n\\nDay 3: Travel to Chitwan National Park for a memorable safari. Explore the diverse wildlife and lush vegetation that make the park a UNESCO World Heritage site. Be on the lookout for rhinos, Bengal tigers, and a multitude of bird species.\\n\\nNote: Communication is key to enjoying your trip. The official language of Nepal is Nepali. It can be helpful to learn a few basic phrases or carry a translation app to help you interact with the local people and enrich your cultural experience.\\n\\nTERMINATE.'\n", + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='Plan a 3 day trip to Nepal.'), TextMessage(source='planner_agent', models_usage=RequestUsage(prompt_tokens=45, completion_tokens=54), content=\"Consider starting your 3-day trip to Nepal with a cultural tour in Kathmandu Valley, followed by an exploration of Pokhara's natural beauty on the second day, and finally, indulge in a thrilling safari at Chitwan National Park on the third day.\"), TextMessage(source='local_agent', models_usage=RequestUsage(prompt_tokens=116, completion_tokens=54), content=\"Consider starting your 3-day trip to Nepal with a cultural tour in Kathmandu Valley, followed by an exploration of Pokhara's natural beauty on the second day, and finally, indulge in a thrilling safari at Chitwan National Park on the third day.\"), TextMessage(source='language_agent', models_usage=RequestUsage(prompt_tokens=201, completion_tokens=45), content=\"Your travel plan lacks a mention of dealing with potential language barriers; it might be useful to learn some phrases in Nepali, as it's the official language of Nepal, or have a local translation app handy during your trip.\"), TextMessage(source='travel_summary_agent', models_usage=RequestUsage(prompt_tokens=270, completion_tokens=237), content='Day 1: Start your adventure in the capital city, Kathmandu. Take a guided tour of Kathmandu Valley to explore its UNESCO World Heritage Sites, such as the Durbar Squares, Swayambhunath Stupa, and Boudhanath Stupa. Engage with the locals and sample some traditional Nepalese cuisine.\\n\\nDay 2: Proceed to Pokhara, known for its stunning natural beauty. Visit the iconic Phewa Lake and enjoy a boat ride, then trek to the Peace Pagoda for a panoramic view of the city. Round off the day with a visit to the fascinating Pokhara Mountain Museum.\\n\\nDay 3: Travel to Chitwan National Park for a memorable safari. Explore the diverse wildlife and lush vegetation that make the park a UNESCO World Heritage site. Be on the lookout for rhinos, Bengal tigers, and a multitude of bird species.\\n\\nNote: Communication is key to enjoying your trip. The official language of Nepal is Nepali. It can be helpful to learn a few basic phrases or carry a translation app to help you interact with the local people and enrich your cultural experience.\\n\\nTERMINATE.')], stop_reason=\"Text 'TERMINATE' mentioned\")\n" ] } ], @@ -305,12 +305,52 @@ " print(response) " ] }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Human in the Loop with a UserProxy Agent\n", + "\n", + "AutoGen studio provides a custom agent allows a human interact as part of the agent team.\n", + "\n" + ] + }, { "cell_type": "code", - "execution_count": null, + "execution_count": 14, "metadata": {}, "outputs": [], - "source": [] + "source": [ + "from autogenstudio.components import UserProxyAgent \n", + "\n", + "def input_func(prompt: str) -> str: \n", + " return \"Hello World there\" + str(prompt)\n", + "user_agent = UserProxyAgent(name=\"user_agent\", description=\"a human user\", input_func=input_func)" + ] + }, + { + "cell_type": "code", + "execution_count": 15, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "source='user' models_usage=None content='hello there'\n", + "source='user_agent' models_usage=None content='Hello World thereEnter your response: '\n", + "TaskResult(messages=[TextMessage(source='user', models_usage=None, content='hello there'), TextMessage(source='user_agent', models_usage=None, content='Hello World thereEnter your response: ')], stop_reason=None)\n" + ] + } + ], + "source": [ + "from autogen_core.base import CancellationToken \n", + "cancellation_token = CancellationToken()\n", + "stream = user_agent.run_stream(task=\"hello there\", cancellation_token=cancellation_token)\n", + "\n", + "async for response in stream:\n", + " print(response)" + ] } ], "metadata": { diff --git a/python/packages/autogen-studio/pyproject.toml b/python/packages/autogen-studio/pyproject.toml index b6d7e2a4fa53..fc3acee48e0f 100644 --- a/python/packages/autogen-studio/pyproject.toml +++ b/python/packages/autogen-studio/pyproject.toml @@ -20,9 +20,10 @@ classifiers = [ dependencies = [ "pydantic", + "pydantic-settings", "fastapi", "typer", - "uvicorn", + "uvicorn", "python-dotenv", "websockets", "numpy < 2.0.0", @@ -30,6 +31,10 @@ dependencies = [ "psycopg", "alembic", "loguru", + "pyyaml", + "autogen-core==0.4.0.dev7", + "autogen-agentchat==0.4.0.dev7", + "autogen-ext==0.4.0.dev7" ] optional-dependencies = {web = ["fastapi", "uvicorn"], database = ["psycopg"]} diff --git a/python/pyproject.toml b/python/pyproject.toml index 83535c6f5402..e9b9753cfca9 100644 --- a/python/pyproject.toml +++ b/python/pyproject.toml @@ -3,8 +3,8 @@ members = ["packages/*"] [tool.uv] dev-dependencies = [ - "pyright==1.1.378", - "mypy==1.10.0", + "pyright==1.1.389", + "mypy==1.13.0", "ruff==0.4.8", "pytest", "pytest-asyncio", @@ -79,3 +79,5 @@ test = "python run_task_in_pkgs_if_exist.py test" check = ["fmt", "lint", "pyright", "mypy", "test"] gen-proto = "python -m grpc_tools.protoc --python_out=./packages/autogen-core/src/autogen_core/application/protos --grpc_python_out=./packages/autogen-core/src/autogen_core/application/protos --mypy_out=./packages/autogen-core/src/autogen_core/application/protos --mypy_grpc_out=./packages/autogen-core/src/autogen_core/application/protos --proto_path ../protos/ agent_worker.proto --proto_path ../protos/ cloudevent.proto" + +gen-test-proto = "python -m grpc_tools.protoc --python_out=./packages/autogen-core/tests/protos --grpc_python_out=./packages/autogen-core/tests/protos --mypy_out=./packages/autogen-core/tests/protos --mypy_grpc_out=./packages/autogen-core/tests/protos --proto_path ./packages/autogen-core/tests/protos serialization_test.proto" \ No newline at end of file diff --git a/python/uv.lock b/python/uv.lock index 266b1122a5d7..4602559d0e01 100644 --- a/python/uv.lock +++ b/python/uv.lock @@ -22,12 +22,12 @@ members = [ requirements = [ { name = "cookiecutter" }, { name = "grpcio-tools", specifier = "~=1.62.0" }, - { name = "mypy", specifier = "==1.10.0" }, + { name = "mypy", specifier = "==1.13.0" }, { name = "mypy-protobuf" }, { name = "packaging" }, { name = "poethepoet" }, { name = "polars" }, - { name = "pyright", specifier = "==1.1.378" }, + { name = "pyright", specifier = "==1.1.389" }, { name = "pytest" }, { name = "pytest-asyncio" }, { name = "pytest-mock" }, @@ -315,7 +315,7 @@ wheels = [ [[package]] name = "autogen-agentchat" -version = "0.4.0.dev6" +version = "0.4.0.dev7" source = { editable = "packages/autogen-agentchat" } dependencies = [ { name = "autogen-core" }, @@ -329,12 +329,11 @@ dev = [] [[package]] name = "autogen-core" -version = "0.4.0.dev6" +version = "0.4.0.dev7" source = { editable = "packages/autogen-core" } dependencies = [ { name = "aiohttp" }, { name = "asyncio-atexit" }, - { name = "grpcio" }, { name = "jsonref" }, { name = "openai" }, { name = "opentelemetry-api" }, @@ -345,6 +344,11 @@ dependencies = [ { name = "typing-extensions" }, ] +[package.optional-dependencies] +grpc = [ + { name = "grpcio" }, +] + [package.dev-dependencies] dev = [ { name = "aiofiles" }, @@ -367,6 +371,7 @@ dev = [ { name = "pip" }, { name = "polars" }, { name = "pydata-sphinx-theme" }, + { name = "pygments" }, { name = "python-dotenv" }, { name = "requests" }, { name = "sphinx" }, @@ -390,7 +395,7 @@ dev = [ requires-dist = [ { name = "aiohttp" }, { name = "asyncio-atexit" }, - { name = "grpcio", specifier = "~=1.62.0" }, + { name = "grpcio", marker = "extra == 'grpc'", specifier = "~=1.62.0" }, { name = "jsonref", specifier = "~=1.1.0" }, { name = "openai", specifier = ">=1.3" }, { name = "opentelemetry-api", specifier = "~=1.27.0" }, @@ -423,6 +428,7 @@ dev = [ { name = "pip" }, { name = "polars" }, { name = "pydata-sphinx-theme", specifier = "==0.15.4" }, + { name = "pygments" }, { name = "python-dotenv" }, { name = "requests" }, { name = "sphinx" }, @@ -444,7 +450,7 @@ dev = [ [[package]] name = "autogen-ext" -version = "0.4.0.dev6" +version = "0.4.0.dev7" source = { editable = "packages/autogen-ext" } dependencies = [ { name = "autogen-core" }, @@ -567,16 +573,21 @@ dev = [ [[package]] name = "autogenstudio" -version = "0.1.6" +version = "0.4.0.dev37" source = { editable = "packages/autogen-studio" } dependencies = [ { name = "alembic" }, + { name = "autogen-agentchat" }, + { name = "autogen-core" }, + { name = "autogen-ext" }, { name = "fastapi" }, { name = "loguru" }, { name = "numpy" }, { name = "psycopg" }, { name = "pydantic" }, + { name = "pydantic-settings" }, { name = "python-dotenv" }, + { name = "pyyaml" }, { name = "sqlmodel" }, { name = "typer" }, { name = "uvicorn" }, @@ -595,6 +606,9 @@ web = [ [package.metadata] requires-dist = [ { name = "alembic" }, + { name = "autogen-agentchat", editable = "packages/autogen-agentchat" }, + { name = "autogen-core", editable = "packages/autogen-core" }, + { name = "autogen-ext", editable = "packages/autogen-ext" }, { name = "fastapi" }, { name = "fastapi", marker = "extra == 'web'" }, { name = "loguru" }, @@ -602,7 +616,9 @@ requires-dist = [ { name = "psycopg" }, { name = "psycopg", marker = "extra == 'database'" }, { name = "pydantic" }, + { name = "pydantic-settings" }, { name = "python-dotenv" }, + { name = "pyyaml" }, { name = "sqlmodel" }, { name = "typer" }, { name = "uvicorn" }, @@ -2397,31 +2413,31 @@ wheels = [ [[package]] name = "mypy" -version = "1.10.0" +version = "1.13.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "mypy-extensions" }, { name = "tomli", marker = "python_full_version < '3.11'" }, { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/c3/b6/297734bb9f20ddf5e831cf4a83f422ddef5a29a33463999f0959d9cdc2df/mypy-1.10.0.tar.gz", hash = "sha256:3d087fcbec056c4ee34974da493a826ce316947485cef3901f511848e687c131", size = 3022145 } +sdist = { url = "https://files.pythonhosted.org/packages/e8/21/7e9e523537991d145ab8a0a2fd98548d67646dc2aaaf6091c31ad883e7c1/mypy-1.13.0.tar.gz", hash = "sha256:0291a61b6fbf3e6673e3405cfcc0e7650bebc7939659fdca2702958038bd835e", size = 3152532 } wheels = [ - { url = "https://files.pythonhosted.org/packages/69/82/2081dbfbbf1071e1370e57f9e327adeda060113688ec0d6bf7bbf4d7a5ad/mypy-1.10.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:da1cbf08fb3b851ab3b9523a884c232774008267b1f83371ace57f412fe308c2", size = 10819193 }, - { url = "https://files.pythonhosted.org/packages/e8/1b/b7c9caa89955a7d9c89eac79f31550f48f2c8233b5e12fe48ef55cd2e953/mypy-1.10.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:12b6bfc1b1a66095ab413160a6e520e1dc076a28f3e22f7fb25ba3b000b4ef99", size = 9970689 }, - { url = "https://files.pythonhosted.org/packages/15/ae/03d3f767f1ca5576970720ea551b43b79254d12998484d8f3e63fc07561e/mypy-1.10.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9e36fb078cce9904c7989b9693e41cb9711e0600139ce3970c6ef814b6ebc2b2", size = 12728098 }, - { url = "https://files.pythonhosted.org/packages/96/ba/8f5db8bd94c18d86033d09bbe634d471c1e9d7014cc621585973183ad1d0/mypy-1.10.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:2b0695d605ddcd3eb2f736cd8b4e388288c21e7de85001e9f85df9187f2b50f9", size = 12798838 }, - { url = "https://files.pythonhosted.org/packages/0e/ad/d476f1055deea6e63a91e065ba046a7ee494705574c4f9730de439172810/mypy-1.10.0-cp310-cp310-win_amd64.whl", hash = "sha256:cd777b780312ddb135bceb9bc8722a73ec95e042f911cc279e2ec3c667076051", size = 9365995 }, - { url = "https://files.pythonhosted.org/packages/86/ec/64ffed9ea554845ff846bd1f6fc7b07ab507be1d2e1b0d58790d7ac2ca4c/mypy-1.10.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3be66771aa5c97602f382230165b856c231d1277c511c9a8dd058be4784472e1", size = 10739848 }, - { url = "https://files.pythonhosted.org/packages/03/ac/f4fcb9d7a349953be5f4e78157a48b5110343a0e5228f77b3f7d1a1b8479/mypy-1.10.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:8b2cbaca148d0754a54d44121b5825ae71868c7592a53b7292eeb0f3fdae95ee", size = 9902362 }, - { url = "https://files.pythonhosted.org/packages/7e/36/ca2b82d89828f484f1a068d9e25c08840c4cc6f6549e7ea755f4391e351f/mypy-1.10.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1ec404a7cbe9fc0e92cb0e67f55ce0c025014e26d33e54d9e506a0f2d07fe5de", size = 12603712 }, - { url = "https://files.pythonhosted.org/packages/b5/7a/54edb45a41de3bc66e5c3d2b7512a392b3f0f8b9c3d9465b9a2456b6a115/mypy-1.10.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e22e1527dc3d4aa94311d246b59e47f6455b8729f4968765ac1eacf9a4760bc7", size = 12676904 }, - { url = "https://files.pythonhosted.org/packages/39/a5/e5aad5567ace09fcb179fbc3047cc2a6173743d84447b1ff71413e1a9881/mypy-1.10.0-cp311-cp311-win_amd64.whl", hash = "sha256:a87dbfa85971e8d59c9cc1fcf534efe664d8949e4c0b6b44e8ca548e746a8d53", size = 9355997 }, - { url = "https://files.pythonhosted.org/packages/30/30/6da95275426cfd21fc0c2e96d85a45d35fc4f7d37bd3286fa49f8f465447/mypy-1.10.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:a781f6ad4bab20eef8b65174a57e5203f4be627b46291f4589879bf4e257b97b", size = 10867123 }, - { url = "https://files.pythonhosted.org/packages/8c/d3/61cf1fae3b79d264f9f27de97e6e8fab8a37c85fdada5a46b6de333319f8/mypy-1.10.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b808e12113505b97d9023b0b5e0c0705a90571c6feefc6f215c1df9381256e30", size = 9859921 }, - { url = "https://files.pythonhosted.org/packages/08/5d/a46e5222bd69a873a896ab4f0b5948979e03dce46c7712ccaa5204ca8d02/mypy-1.10.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f55583b12156c399dce2df7d16f8a5095291354f1e839c252ec6c0611e86e2e", size = 12647776 }, - { url = "https://files.pythonhosted.org/packages/1d/6a/d8df60f2e48291f1a790ded56fd96421ac6a992f33c2571c0bdf0552d83a/mypy-1.10.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:4cf18f9d0efa1b16478c4c129eabec36148032575391095f73cae2e722fcf9d5", size = 12726191 }, - { url = "https://files.pythonhosted.org/packages/5a/93/9a015720bcf484d4202ea7fc5960c328c82d5eb1578950d586339ec15084/mypy-1.10.0-cp312-cp312-win_amd64.whl", hash = "sha256:bc6ac273b23c6b82da3bb25f4136c4fd42665f17f2cd850771cb600bdd2ebeda", size = 9450377 }, - { url = "https://files.pythonhosted.org/packages/e9/39/0148f7ee1b7f3a86d378a23b88cb85c432f83914ceb60364efa1769c598f/mypy-1.10.0-py3-none-any.whl", hash = "sha256:f8c083976eb530019175aabadb60921e73b4f45736760826aa1689dda8208aee", size = 2580084 }, + { url = "https://files.pythonhosted.org/packages/5e/8c/206de95a27722b5b5a8c85ba3100467bd86299d92a4f71c6b9aa448bfa2f/mypy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a", size = 11020731 }, + { url = "https://files.pythonhosted.org/packages/ab/bb/b31695a29eea76b1569fd28b4ab141a1adc9842edde080d1e8e1776862c7/mypy-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80", size = 10184276 }, + { url = "https://files.pythonhosted.org/packages/a5/2d/4a23849729bb27934a0e079c9c1aad912167d875c7b070382a408d459651/mypy-1.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7b2353a44d2179846a096e25691d54d59904559f4232519d420d64da6828a3a7", size = 12587706 }, + { url = "https://files.pythonhosted.org/packages/5c/c3/d318e38ada50255e22e23353a469c791379825240e71b0ad03e76ca07ae6/mypy-1.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0730d1c6a2739d4511dc4253f8274cdd140c55c32dfb0a4cf8b7a43f40abfa6f", size = 13105586 }, + { url = "https://files.pythonhosted.org/packages/4a/25/3918bc64952370c3dbdbd8c82c363804678127815febd2925b7273d9482c/mypy-1.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:c5fc54dbb712ff5e5a0fca797e6e0aa25726c7e72c6a5850cfd2adbc1eb0a372", size = 9632318 }, + { url = "https://files.pythonhosted.org/packages/d0/19/de0822609e5b93d02579075248c7aa6ceaddcea92f00bf4ea8e4c22e3598/mypy-1.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:581665e6f3a8a9078f28d5502f4c334c0c8d802ef55ea0e7276a6e409bc0d82d", size = 10939027 }, + { url = "https://files.pythonhosted.org/packages/c8/71/6950fcc6ca84179137e4cbf7cf41e6b68b4a339a1f5d3e954f8c34e02d66/mypy-1.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3ddb5b9bf82e05cc9a627e84707b528e5c7caaa1c55c69e175abb15a761cec2d", size = 10108699 }, + { url = "https://files.pythonhosted.org/packages/26/50/29d3e7dd166e74dc13d46050b23f7d6d7533acf48f5217663a3719db024e/mypy-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:20c7ee0bc0d5a9595c46f38beb04201f2620065a93755704e141fcac9f59db2b", size = 12506263 }, + { url = "https://files.pythonhosted.org/packages/3f/1d/676e76f07f7d5ddcd4227af3938a9c9640f293b7d8a44dd4ff41d4db25c1/mypy-1.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3790ded76f0b34bc9c8ba4def8f919dd6a46db0f5a6610fb994fe8efdd447f73", size = 12984688 }, + { url = "https://files.pythonhosted.org/packages/9c/03/5a85a30ae5407b1d28fab51bd3e2103e52ad0918d1e68f02a7778669a307/mypy-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:51f869f4b6b538229c1d1bcc1dd7d119817206e2bc54e8e374b3dfa202defcca", size = 9626811 }, + { url = "https://files.pythonhosted.org/packages/fb/31/c526a7bd2e5c710ae47717c7a5f53f616db6d9097caf48ad650581e81748/mypy-1.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5c7051a3461ae84dfb5dd15eff5094640c61c5f22257c8b766794e6dd85e72d5", size = 11077900 }, + { url = "https://files.pythonhosted.org/packages/83/67/b7419c6b503679d10bd26fc67529bc6a1f7a5f220bbb9f292dc10d33352f/mypy-1.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39bb21c69a5d6342f4ce526e4584bc5c197fd20a60d14a8624d8743fffb9472e", size = 10074818 }, + { url = "https://files.pythonhosted.org/packages/ba/07/37d67048786ae84e6612575e173d713c9a05d0ae495dde1e68d972207d98/mypy-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:164f28cb9d6367439031f4c81e84d3ccaa1e19232d9d05d37cb0bd880d3f93c2", size = 12589275 }, + { url = "https://files.pythonhosted.org/packages/1f/17/b1018c6bb3e9f1ce3956722b3bf91bff86c1cefccca71cec05eae49d6d41/mypy-1.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a4c1bfcdbce96ff5d96fc9b08e3831acb30dc44ab02671eca5953eadad07d6d0", size = 13037783 }, + { url = "https://files.pythonhosted.org/packages/cb/32/cd540755579e54a88099aee0287086d996f5a24281a673f78a0e14dba150/mypy-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:a0affb3a79a256b4183ba09811e3577c5163ed06685e4d4b46429a271ba174d2", size = 9726197 }, + { url = "https://files.pythonhosted.org/packages/3b/86/72ce7f57431d87a7ff17d442f521146a6585019eb8f4f31b7c02801f78ad/mypy-1.13.0-py3-none-any.whl", hash = "sha256:9c250883f9fd81d212e0952c92dbfcc96fc237f4b7c92f56ac81fd48460b3e5a", size = 2647043 }, ] [[package]] @@ -3300,14 +3316,15 @@ wheels = [ [[package]] name = "pyright" -version = "1.1.378" +version = "1.1.389" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "nodeenv" }, + { name = "typing-extensions" }, ] -sdist = { url = "https://files.pythonhosted.org/packages/3d/f0/e8aa5555410d88f898bef04da2102b0a9bf144658c98d34872e91621ced2/pyright-1.1.378.tar.gz", hash = "sha256:78a043be2876d12d0af101d667e92c7734f3ebb9db71dccc2c220e7e7eb89ca2", size = 17486 } +sdist = { url = "https://files.pythonhosted.org/packages/72/4e/9a5ab8745e7606b88c2c7ca223449ac9d82a71fd5e31df47b453f2cb39a1/pyright-1.1.389.tar.gz", hash = "sha256:716bf8cc174ab8b4dcf6828c3298cac05c5ed775dda9910106a5dcfe4c7fe220", size = 21940 } wheels = [ - { url = "https://files.pythonhosted.org/packages/38/c6/f0d4bc20c13b20cecfbf13c699477c825e45767f1dc5068137323f86e495/pyright-1.1.378-py3-none-any.whl", hash = "sha256:8853776138b01bc284da07ac481235be7cc89d3176b073d2dba73636cb95be79", size = 18222 }, + { url = "https://files.pythonhosted.org/packages/1b/26/c288cabf8cfc5a27e1aa9e5029b7682c0f920b8074f45d22bf844314d66a/pyright-1.1.389-py3-none-any.whl", hash = "sha256:41e9620bba9254406dc1f621a88ceab5a88af4c826feb4f614d95691ed243a60", size = 18581 }, ] [[package]]